id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7,100
|
tfidfmodel.py
|
piskvorky_gensim/gensim/models/tfidfmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2017 Mohit Rathore <mrmohitrathoremr@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module implements functionality related to the `Term Frequency - Inverse Document Frequency
<https://en.wikipedia.org/wiki/Tf%E2%80%93idf>`_ class of bag-of-words vector space models.
"""
import logging
from functools import partial
import re
import numpy as np
from gensim import interfaces, matutils, utils
from gensim.utils import deprecated
logger = logging.getLogger(__name__)
def resolve_weights(smartirs):
"""Check the validity of `smartirs` parameters.
Parameters
----------
smartirs : str
`smartirs` or SMART (System for the Mechanical Analysis and Retrieval of Text)
Information Retrieval System, a mnemonic scheme for denoting tf-idf weighting
variants in the vector space model. The mnemonic for representing a combination
of weights takes the form ddd, where the letters represents the term weighting of the document vector.
for more information visit `SMART Information Retrieval System
<https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System>`_.
Returns
-------
str of (local_letter, global_letter, normalization_letter)
local_letter : str
Term frequency weighing, one of:
* `b` - binary,
* `t` or `n` - raw,
* `a` - augmented,
* `l` - logarithm,
* `d` - double logarithm,
* `L` - log average.
global_letter : str
Document frequency weighting, one of:
* `x` or `n` - none,
* `f` - idf,
* `t` - zero-corrected idf,
* `p` - probabilistic idf.
normalization_letter : str
Document normalization, one of:
* `x` or `n` - none,
* `c` - cosine,
* `u` - pivoted unique,
* `b` - pivoted character length.
Raises
------
ValueError
If `smartirs` is not a string of length 3 or one of the decomposed value
doesn't fit the list of permissible values.
"""
if isinstance(smartirs, str) and re.match(r"...\....", smartirs):
match = re.match(r"(?P<ddd>...)\.(?P<qqq>...)", smartirs)
raise ValueError(
"The notation {ddd}.{qqq} specifies two term-weighting schemes, "
"one for collection documents ({ddd}) and one for queries ({qqq}). "
"You must train two separate tf-idf models.".format(
ddd=match.group("ddd"),
qqq=match.group("qqq"),
)
)
if not isinstance(smartirs, str) or len(smartirs) != 3:
raise ValueError("Expected a string of length 3 got " + smartirs)
w_tf, w_df, w_n = smartirs
if w_tf not in 'btnaldL':
raise ValueError("Expected term frequency weight to be one of 'btnaldL', got {}".format(w_tf))
if w_df not in 'xnftp':
raise ValueError("Expected inverse document frequency weight to be one of 'xnftp', got {}".format(w_df))
if w_n not in 'xncub':
raise ValueError("Expected normalization weight to be one of 'xncub', got {}".format(w_n))
# resolve aliases
if w_tf == "t":
w_tf = "n"
if w_df == "x":
w_df = "n"
if w_n == "x":
w_n = "n"
return w_tf + w_df + w_n
def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0):
r"""Compute inverse-document-frequency for a term with the given document frequency `docfreq`:
:math:`idf = add + log_{log\_base} \frac{totaldocs}{docfreq}`
Parameters
----------
docfreq : {int, float}
Document frequency.
totaldocs : int
Total number of documents.
log_base : float, optional
Base of logarithm.
add : float, optional
Offset.
Returns
-------
float
Inverse document frequency.
"""
return add + np.log(float(totaldocs) / docfreq) / np.log(log_base)
def precompute_idfs(wglobal, dfs, total_docs):
"""Pre-compute the inverse document frequency mapping for all terms.
Parameters
----------
wglobal : function
Custom function for calculating the "global" weighting function.
See for example the SMART alternatives under :func:`~gensim.models.tfidfmodel.smartirs_wglobal`.
dfs : dict
Dictionary mapping `term_id` into how many documents did that term appear in.
total_docs : int
Total number of documents.
Returns
-------
dict of (int, float)
Inverse document frequencies in the format `{term_id_1: idfs_1, term_id_2: idfs_2, ...}`.
"""
# not strictly necessary and could be computed on the fly in TfidfModel__getitem__.
# this method is here just to speed things up a little.
return {termid: wglobal(df, total_docs) for termid, df in dfs.items()}
def smartirs_wlocal(tf, local_scheme):
"""Calculate local term weight for a term using the weighting scheme specified in `local_scheme`.
Parameters
----------
tf : int
Term frequency.
local : {'b', 'n', 'a', 'l', 'd', 'L'}
Local transformation scheme.
Returns
-------
float
Calculated local weight.
"""
if local_scheme == "n":
return tf
elif local_scheme == "l":
return 1 + np.log2(tf)
elif local_scheme == "d":
return 1 + np.log2(1 + np.log2(tf))
elif local_scheme == "a":
return 0.5 + (0.5 * tf / tf.max(axis=0))
elif local_scheme == "b":
return tf.astype('bool').astype('int')
elif local_scheme == "L":
return (1 + np.log2(tf)) / (1 + np.log2(tf.mean(axis=0)))
def smartirs_wglobal(docfreq, totaldocs, global_scheme):
"""Calculate global document weight based on the weighting scheme specified in `global_scheme`.
Parameters
----------
docfreq : int
Document frequency.
totaldocs : int
Total number of documents.
global_scheme : {'n', 'f', 't', 'p'}
Global transformation scheme.
Returns
-------
float
Calculated global weight.
"""
if global_scheme == "n":
return 1.0
elif global_scheme == "f":
return np.log2(1.0 * totaldocs / docfreq)
elif global_scheme == "t":
return np.log2((totaldocs + 1.0) / docfreq)
elif global_scheme == "p":
return max(0, np.log2((1.0 * totaldocs - docfreq) / docfreq))
@deprecated("Function will be removed in 4.0.0")
def smartirs_normalize(x, norm_scheme, return_norm=False):
"""Normalize a vector using the normalization scheme specified in `norm_scheme`.
Parameters
----------
x : numpy.ndarray
The tf-idf vector.
norm_scheme : {'n', 'c'}
Document length normalization scheme.
return_norm : bool, optional
Return the length of `x` as well?
Returns
-------
numpy.ndarray
Normalized array.
float (only if return_norm is set)
Norm of `x`.
"""
if norm_scheme == "n":
if return_norm:
_, length = matutils.unitvec(x, return_norm=return_norm)
return x, length
else:
return x
elif norm_scheme == "c":
return matutils.unitvec(x, return_norm=return_norm)
class TfidfModel(interfaces.TransformationABC):
"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted TF-IDF matrix (positive floats).
Examples
--------
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>> from gensim.models import TfidfModel
>>> from gensim.corpora import Dictionary
>>>
>>> dataset = api.load("text8")
>>> dct = Dictionary(dataset) # fit dictionary
>>> corpus = [dct.doc2bow(line) for line in dataset] # convert corpus to BoW format
>>>
>>> model = TfidfModel(corpus) # fit model
>>> vector = model[corpus[0]] # apply model to the first corpus document
"""
def __init__(self, corpus=None, id2word=None, dictionary=None, wlocal=utils.identity,
wglobal=df2idf, normalize=True, smartirs=None, pivot=None, slope=0.25):
r"""Compute TF-IDF by multiplying a local component (term frequency) with a global component
(inverse document frequency), and normalizing the resulting documents to unit length.
Formula for non-normalized weight of term :math:`i` in document :math:`j` in a corpus of :math:`D` documents
.. math:: weight_{i,j} = frequency_{i,j} * log_2 \frac{D}{document\_freq_{i}}
or, more generally
.. math:: weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document\_freq_{i}, D)
so you can plug in your own custom :math:`wlocal` and :math:`wglobal` functions.
Parameters
----------
corpus : iterable of iterable of (int, int), optional
Input corpus
id2word : {dict, :class:`~gensim.corpora.Dictionary`}, optional
Mapping token - id, that was used for converting input data to bag of words format.
dictionary : :class:`~gensim.corpora.Dictionary`
If `dictionary` is specified, it must be a `corpora.Dictionary` object and it will be used.
to directly construct the inverse document frequency mapping (then `corpus`, if specified, is ignored).
wlocals : callable, optional
Function for local weighting, default for `wlocal` is :func:`~gensim.utils.identity`
(other options: :func:`numpy.sqrt`, `lambda tf: 0.5 + (0.5 * tf / tf.max())`, etc.).
wglobal : callable, optional
Function for global weighting, default is :func:`~gensim.models.tfidfmodel.df2idf`.
normalize : {bool, callable}, optional
Normalize document vectors to unit euclidean length? You can also inject your own function into `normalize`.
smartirs : str, optional
SMART (System for the Mechanical Analysis and Retrieval of Text) Information Retrieval System,
a mnemonic scheme for denoting tf-idf weighting variants in the vector space model.
The mnemonic for representing a combination of weights takes the form XYZ,
for example 'ntc', 'bpn' and so on, where the letters represents the term weighting of the document vector.
Term frequency weighing:
* `b` - binary,
* `t` or `n` - raw,
* `a` - augmented,
* `l` - logarithm,
* `d` - double logarithm,
* `L` - log average.
Document frequency weighting:
* `x` or `n` - none,
* `f` - idf,
* `t` - zero-corrected idf,
* `p` - probabilistic idf.
Document normalization:
* `x` or `n` - none,
* `c` - cosine,
* `u` - pivoted unique,
* `b` - pivoted character length.
Default is 'nfc'.
For more information visit `SMART Information Retrieval System
<https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System>`_.
pivot : float or None, optional
In information retrieval, TF-IDF is biased against long documents [1]_. Pivoted document length
normalization solves this problem by changing the norm of a document to `slope * old_norm + (1.0 -
slope) * pivot`.
You can either set the `pivot` by hand, or you can let Gensim figure it out automatically with the following
two steps:
* Set either the `u` or `b` document normalization in the `smartirs` parameter.
* Set either the `corpus` or `dictionary` parameter. The `pivot` will be automatically determined from
the properties of the `corpus` or `dictionary`.
If `pivot` is None and you don't follow steps 1 and 2, then pivoted document length normalization will be
disabled. Default is None.
See also the blog post at https://rare-technologies.com/pivoted-document-length-normalisation/.
slope : float, optional
In information retrieval, TF-IDF is biased against long documents [1]_. Pivoted document length
normalization solves this problem by changing the norm of a document to `slope * old_norm + (1.0 -
slope) * pivot`.
Setting the `slope` to 0.0 uses only the `pivot` as the norm, and setting the `slope` to 1.0 effectively
disables pivoted document length normalization. Singhal [2]_ suggests setting the `slope` between 0.2 and
0.3 for best results. Default is 0.25.
See also the blog post at https://rare-technologies.com/pivoted-document-length-normalisation/.
References
----------
.. [1] Singhal, A., Buckley, C., & Mitra, M. (1996). `Pivoted Document Length
Normalization <http://singhal.info/pivoted-dln.pdf>`_. *SIGIR Forum*, 51, 176–184.
.. [2] Singhal, A. (2001). `Modern information retrieval: A brief overview <http://singhal.info/ieee2001.pdf>`_.
*IEEE Data Eng. Bull.*, 24(4), 35–43.
"""
self.id2word = id2word
self.wlocal, self.wglobal, self.normalize = wlocal, wglobal, normalize
self.num_docs, self.num_nnz, self.idfs = None, None, None
self.smartirs = resolve_weights(smartirs) if smartirs is not None else None
self.slope = slope
self.pivot = pivot
self.eps = 1e-12
if smartirs is not None:
n_tf, n_df, n_n = self.smartirs
self.wlocal = partial(smartirs_wlocal, local_scheme=n_tf)
self.wglobal = partial(smartirs_wglobal, global_scheme=n_df)
if dictionary is not None:
# user supplied a Dictionary object, which already contains all the
# statistics we need to construct the IDF mapping. we can skip the
# step that goes through the corpus (= an optimization).
if corpus is not None:
logger.warning(
"constructor received both corpus and explicit inverse document frequencies; ignoring the corpus"
)
self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
self.cfs = dictionary.cfs.copy()
self.dfs = dictionary.dfs.copy()
self.term_lens = {termid: len(term) for termid, term in dictionary.items()}
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
if id2word is None:
self.id2word = dictionary
elif corpus is not None:
self.initialize(corpus)
else:
# NOTE: everything is left uninitialized; presumably the model will
# be initialized in some other way
pass
# If smartirs is not None, override pivot and normalize
if smartirs is None:
return
if self.pivot is not None:
if n_n in 'ub':
logger.warning("constructor received pivot; ignoring smartirs[2]")
return
if n_n in 'ub' and callable(self.normalize):
logger.warning("constructor received smartirs; ignoring normalize")
if n_n in 'ub' and not dictionary and not corpus:
logger.warning("constructor received no corpus or dictionary; ignoring smartirs[2]")
elif n_n == "u":
self.pivot = 1.0 * self.num_nnz / self.num_docs
elif n_n == "b":
self.pivot = 1.0 * sum(
self.cfs[termid] * (self.term_lens[termid] + 1.0) for termid in dictionary.keys()
) / self.num_docs
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved TfidfModel class. Handles backwards compatibility from
older TfidfModel versions which did not use pivoted document normalization.
"""
model = super(TfidfModel, cls).load(*args, **kwargs)
if not hasattr(model, 'pivot'):
model.pivot = None
logger.info('older version of %s loaded without pivot arg', cls.__name__)
logger.info('Setting pivot to %s.', model.pivot)
if not hasattr(model, 'slope'):
model.slope = 0.65
logger.info('older version of %s loaded without slope arg', cls.__name__)
logger.info('Setting slope to %s.', model.slope)
if not hasattr(model, 'smartirs'):
model.smartirs = None
logger.info('older version of %s loaded without smartirs arg', cls.__name__)
logger.info('Setting smartirs to %s.', model.smartirs)
return model
def __str__(self):
return "%s<num_docs=%s, num_nnz=%s>" % (self.__class__.__name__, self.num_docs, self.num_nnz)
def initialize(self, corpus):
"""Compute inverse document weights, which will be used to modify term frequencies for documents.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
logger.info("collecting document frequencies")
dfs = {}
numnnz, docno = 0, -1
for docno, bow in enumerate(corpus):
if docno % 10000 == 0:
logger.info("PROGRESS: processing document #%i", docno)
numnnz += len(bow)
for termid, _ in bow:
dfs[termid] = dfs.get(termid, 0) + 1
# keep some stats about the training corpus
self.num_docs = docno + 1
self.num_nnz = numnnz
self.cfs = None
self.dfs = dfs
self.term_lengths = None
# and finally compute the idf weights
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
self.add_lifecycle_event(
"initialize",
msg=(
f"calculated IDF weights for {self.num_docs} documents and {max(dfs.keys()) + 1 if dfs else 0}"
f" features ({self.num_nnz} matrix non-zeros)"
),
)
def __getitem__(self, bow, eps=1e-12):
"""Get the tf-idf representation of an input vector and/or corpus.
bow : {list of (int, int), iterable of iterable of (int, int)}
Input document in the `sparse Gensim bag-of-words format
<https://radimrehurek.com/gensim/intro.html#core-concepts>`_,
or a streamed corpus of such documents.
eps : float
Threshold value, will remove all position that have tfidf-value less than `eps`.
Returns
-------
vector : list of (int, float)
TfIdf vector, if `bow` is a single document
:class:`~gensim.interfaces.TransformedCorpus`
TfIdf corpus, if `bow` is a corpus.
"""
self.eps = eps
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge weight,
# as strict application of the IDF formula would dictate)
termid_array, tf_array = [], []
for termid, tf in bow:
termid_array.append(termid)
tf_array.append(tf)
tf_array = self.wlocal(np.array(tf_array))
vector = [
(termid, tf * self.idfs.get(termid))
for termid, tf in zip(termid_array, tf_array) if abs(self.idfs.get(termid, 0.0)) > self.eps
]
# and finally, normalize the vector either to unit length, or use a
# user-defined normalization function
if self.smartirs:
n_n = self.smartirs[2]
if n_n == "n" or (n_n in 'ub' and self.pivot is None):
if self.pivot is not None:
_, old_norm = matutils.unitvec(vector, return_norm=True)
norm_vector = vector
elif n_n == "c":
if self.pivot is not None:
_, old_norm = matutils.unitvec(vector, return_norm=True)
else:
norm_vector = matutils.unitvec(vector)
elif n_n == "u":
_, old_norm = matutils.unitvec(vector, return_norm=True, norm='unique')
elif n_n == "b":
old_norm = sum(freq * (self.term_lens[termid] + 1.0) for termid, freq in bow)
else:
if self.normalize is True:
self.normalize = matutils.unitvec
elif self.normalize is False:
self.normalize = utils.identity
if self.pivot is not None:
_, old_norm = self.normalize(vector, return_norm=True)
else:
norm_vector = self.normalize(vector)
if self.pivot is None:
norm_vector = [(termid, weight) for termid, weight in norm_vector if abs(weight) > self.eps]
else:
pivoted_norm = (1 - self.slope) * self.pivot + self.slope * old_norm
norm_vector = [
(termid, weight / float(pivoted_norm))
for termid, weight in vector
if abs(weight / float(pivoted_norm)) > self.eps
]
return norm_vector
| 21,476
|
Python
|
.py
| 457
| 37.234136
| 120
| 0.604616
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,101
|
bm25model.py
|
piskvorky_gensim/gensim/models/bm25model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module implements functionality related to the `Okapi Best Matching
<https://en.wikipedia.org/wiki/Okapi_BM25>`_ class of bag-of-words vector space models.
Robertson and Zaragoza [1]_ describe the original algorithm and its modifications.
.. [1] Robertson S., Zaragoza H. (2015). `The Probabilistic Relevance Framework: BM25 and
Beyond, <http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf>`_.
"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import logging
import math
from gensim import interfaces, utils
import numpy as np
logger = logging.getLogger(__name__)
class BM25ABC(interfaces.TransformationABC, metaclass=ABCMeta):
"""Objects of this abstract class realize the transformation between word-document co-occurrence
matrix (int) into a BM25 matrix (positive floats). Concrete subclasses of this abstract class
implement different BM25 scoring functions.
"""
def __init__(self, corpus=None, dictionary=None):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
Attributes
----------
avgdl : float
The average length of a document.
idfs : dict of (int, float)
A mapping from term ids to inverse term document frequencies.
"""
self.avgdl, self.idfs = None, None
if dictionary:
if corpus:
logger.warning("constructor received both corpus and dictionary; ignoring the corpus")
num_tokens = sum(dictionary.cfs.values())
self.avgdl = num_tokens / dictionary.num_docs
self.idfs = self.precompute_idfs(dictionary.dfs, dictionary.num_docs)
elif corpus:
dfs = defaultdict(lambda: 0)
num_tokens = 0
num_docs = 0
for bow in corpus:
num_tokens += len(bow)
for term_id in set(term_id for term_id, _ in bow):
dfs[term_id] += 1
num_docs += 1
self.avgdl = num_tokens / num_docs
self.idfs = self.precompute_idfs(dfs, num_docs)
else:
pass
@abstractmethod
def precompute_idfs(self, dfs, num_docs):
"""Precompute inverse term document frequencies, which will be used to weight term frequencies
for the documents.
Parameters
----------
dfs : dict of (int, int)
A mapping from term ids to term document frequencies.
num_docs : int
The total number of documents in the training corpus.
Returns
-------
idfs : dict of (int, float)
A mapping from term ids to inverse term document frequencies.
"""
pass
@abstractmethod
def get_term_weights(self, num_tokens, term_frequencies, idfs):
"""Compute vector space weights for a set of terms in a document.
Parameters
----------
num_tokens : int
The number of tokens in the document.
term_frequencies : ndarray
1D array of term frequencies.
idfs : ndarray
1D array of inverse term document frequencies.
Returns
-------
term_weights : ndarray
1D array of vector space weights.
"""
pass
def __getitem__(self, bow):
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
num_tokens = sum(freq for term_id, freq in bow)
term_ids, term_frequencies, idfs = [], [], []
for term_id, term_frequency in bow:
term_ids.append(term_id)
term_frequencies.append(term_frequency)
idfs.append(self.idfs.get(term_id) or 0.0)
term_frequencies, idfs = np.array(term_frequencies), np.array(idfs)
term_weights = self.get_term_weights(num_tokens, term_frequencies, idfs)
vector = [
(term_id, float(weight))
for term_id, weight
in zip(term_ids, term_weights)
]
return vector
class OkapiBM25Model(BM25ABC):
"""The original Okapi BM25 scoring function of Robertson et al. [2]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import OkapiBM25Model
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> model = OkapiBM25Model(dictionary=dictionary) # fit model
>>>
>>> corpus = [dictionary.doc2bow(line) for line in common_texts] # convert corpus to BoW format
>>> vector = model[corpus[0]] # apply model to the first corpus document
References
----------
.. [2] Robertson S. E., Walker S., Jones S., Hancock-Beaulieu M. M., Gatford M. (1995).
`Okapi at TREC-3 <http://research.microsoft.com/pubs/67649/okapi_trec3.pdf>`_.
*NIST Special Publication 500-226*.
"""
def __init__(self, corpus=None, dictionary=None, k1=1.5, b=0.75, epsilon=0.25):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [5]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [5]_ suggests to set `b` to 0.75, which is the default.
epsilon : float
A positive tuning parameter that lower-bounds an inverse document frequency.
Defaults to 0.25.
Attributes
----------
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [3]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [3]_ suggests to set `b` to 0.75, which is the default.
epsilon : float
A positive tuning parameter that lower-bounds an inverse document frequency.
Defaults to 0.25.
References
----------
.. [3] Singhal, A. (2001). `Modern information retrieval: A brief overview
<http://singhal.info/ieee2001.pdf>`_. *IEEE Data Eng. Bull.*, 24(4), 35–43.
"""
self.k1, self.b, self.epsilon = k1, b, epsilon
super().__init__(corpus, dictionary)
def precompute_idfs(self, dfs, num_docs):
idf_sum = 0
idfs = dict()
negative_idfs = []
for term_id, freq in dfs.items():
idf = math.log(num_docs - freq + 0.5) - math.log(freq + 0.5)
idfs[term_id] = idf
idf_sum += idf
if idf < 0:
negative_idfs.append(term_id)
average_idf = idf_sum / len(idfs)
eps = self.epsilon * average_idf
for term_id in negative_idfs:
idfs[term_id] = eps
return idfs
def get_term_weights(self, num_tokens, term_frequencies, idfs):
term_weights = idfs * (term_frequencies * (self.k1 + 1)
/ (term_frequencies + self.k1 * (1 - self.b + self.b
* num_tokens / self.avgdl)))
return term_weights
class LuceneBM25Model(BM25ABC):
"""The scoring function of Apache Lucene 8 [4]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import LuceneBM25Model
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(line) for line in common_texts] # convert corpus to BoW format
>>>
>>> model = LuceneBM25Model(dictionary=dictionary) # fit model
>>> vector = model[corpus[0]] # apply model to the first corpus document
References
----------
.. [4] Kamphuis, C., de Vries, A. P., Boytsov, L., Lin, J. (2020). Which
BM25 Do You Mean? `A Large-Scale Reproducibility Study of Scoring Variants
<https://doi.org/10.1007/978-3-030-45442-5_4>`_. In: Advances in Information Retrieval.
28–34.
"""
def __init__(self, corpus=None, dictionary=None, k1=1.5, b=0.75):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [5]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [5]_ suggests to set `b` to 0.75, which is the default.
Attributes
----------
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [3]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [3]_ suggests to set `b` to 0.75, which is the default.
"""
self.k1, self.b = k1, b
super().__init__(corpus, dictionary)
def precompute_idfs(self, dfs, num_docs):
idfs = dict()
for term_id, freq in dfs.items():
idf = math.log(num_docs + 1.0) - math.log(freq + 0.5)
idfs[term_id] = idf
return idfs
def get_term_weights(self, num_tokens, term_frequencies, idfs):
term_weights = idfs * (term_frequencies
/ (term_frequencies + self.k1 * (1 - self.b + self.b
* num_tokens / self.avgdl)))
return term_weights
class AtireBM25Model(BM25ABC):
"""The scoring function of Trotman et al. [5]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import AtireBM25Model
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(line) for line in common_texts] # convert corpus to BoW format
>>>
>>> model = AtireBM25Model(dictionary=dictionary) # fit model
>>> vector = model[corpus[0]] # apply model to the first corpus document
References
----------
.. [5] Trotman, A., Jia X., Crane M., `Towards an Efficient and Effective Search Engine
<http://www.cs.otago.ac.nz/homepages/andrew/involvement/2012-SIGIR-OSIR.pdf#page=45>`_,
In: SIGIR 2012 Workshop on Open Source Information Retrieval. 40–47.
"""
def __init__(self, corpus=None, dictionary=None, k1=1.5, b=0.75):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [5]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [5]_ suggests to set `b` to 0.75, which is the default.
Attributes
----------
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [3]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [3]_ suggests to set `b` to 0.75, which is the default.
"""
self.k1, self.b = k1, b
super().__init__(corpus, dictionary)
def precompute_idfs(self, dfs, num_docs):
idfs = dict()
for term_id, freq in dfs.items():
idf = math.log(num_docs) - math.log(freq)
idfs[term_id] = idf
return idfs
def get_term_weights(self, num_tokens, term_frequencies, idfs):
term_weights = idfs * (term_frequencies * (self.k1 + 1)
/ (term_frequencies + self.k1 * (1 - self.b + self.b
* num_tokens / self.avgdl)))
return term_weights
| 17,136
|
Python
|
.py
| 328
| 41.926829
| 104
| 0.624649
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,102
|
ldamulticore.py
|
piskvorky_gensim/gensim/models/ldamulticore.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jan Zikes, Radim Rehurek
# Copyright (C) 2014 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Online Latent Dirichlet Allocation (LDA) in Python, using all CPU cores to parallelize and speed up model training.
The parallelization uses multiprocessing; in case this doesn't work for you for some reason,
try the :class:`gensim.models.ldamodel.LdaModel` class which is an equivalent, but more straightforward and single-core
implementation.
The training algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM
Wall-clock `performance on the English Wikipedia <https://radimrehurek.com/gensim/wiki.html>`_ (2G corpus positions,
3.5M documents, 100K features, 0.54G non-zero entries in the final bag-of-words matrix), requesting 100 topics:
====================================================== ==============
algorithm training time
====================================================== ==============
LdaMulticore(workers=1) 2h30m
LdaMulticore(workers=2) 1h24m
LdaMulticore(workers=3) 1h6m
old LdaModel() 3h44m
simply iterating over input corpus = I/O overhead 20m
====================================================== ==============
(Measured on `this i7 server <http://www.hetzner.de/en/hosting/produkte_rootserver/ex40ssd>`_
with 4 physical cores, so that optimal `workers=3`, one less than the number of cores.)
This module allows both LDA model estimation from a training corpus and inference of topic distribution on new,
unseen documents. The model can also be updated with new documents for online training.
The core estimation code is based on the `onlineldavb.py script
<https://github.com/blei-lab/onlineldavb/blob/master/onlineldavb.py>`_, by
Matthew D. Hoffman, David M. Blei, Francis Bach:
`'Online Learning for Latent Dirichlet Allocation', NIPS 2010`_.
.. _'Online Learning for Latent Dirichlet Allocation', NIPS 2010: online-lda_
.. _'Online Learning for LDA' by Hoffman et al.: online-lda_
.. _online-lda: https://papers.neurips.cc/paper/2010/file/71f6278d140af599e06ad9bf1ba03cb0-Paper.pdf
Usage examples
--------------
The constructor estimates Latent Dirichlet Allocation model parameters based on a training corpus
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>>
>>> lda = LdaMulticore(common_corpus, id2word=common_dictionary, num_topics=10)
Save a model to disk, or reload a pre-trained model
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Save model to disk.
>>> temp_file = datapath("model")
>>> lda.save(temp_file)
>>>
>>> # Load a potentially pretrained model from disk.
>>> lda = LdaModel.load(temp_file)
Query, or update the model using new, unseen documents
.. sourcecode:: pycon
>>> other_texts = [
... ['computer', 'time', 'graph'],
... ['survey', 'response', 'eps'],
... ['human', 'system', 'computer']
... ]
>>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]
>>>
>>> unseen_doc = other_corpus[0]
>>> vector = lda[unseen_doc] # get topic probability distribution for a document
>>>
>>> # Update the model by incrementally training on the new corpus.
>>> lda.update(other_corpus) # update the LDA model with additional documents
"""
import logging
import queue
from multiprocessing import Pool, Queue, cpu_count
import numpy as np
from gensim import utils
from gensim.models.ldamodel import LdaModel, LdaState
logger = logging.getLogger(__name__)
class LdaMulticore(LdaModel):
"""An optimized implementation of the LDA algorithm, able to harness the power of multicore CPUs.
Follows the similar API as the parent class :class:`~gensim.models.ldamodel.LdaModel`.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, workers=None,
chunksize=2000, passes=1, batch=False, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50,
gamma_threshold=0.001, random_state=None, minimum_probability=0.01,
minimum_phi_value=0.01, per_word_topics=False, dtype=np.float32):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaModel.update` manually).
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
id2word : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
workers : int, optional
Number of workers processes to be used for parallelization. If None all available cores
(as estimated by `workers=cpu_count()-1` will be used. **Note** however that for
hyper-threaded CPUs, this estimation returns a too high number -- set `workers`
directly to the number of your **real** cores (not hyperthreads) minus one, for optimal performance.
chunksize : int, optional
Number of documents to be used in each training chunk.
passes : int, optional
Number of passes through the corpus during training.
alpha : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on document-topic distribution, this can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`.
eta : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on topic-word distribution, this can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
minimum_probability : float, optional
Topics with a probability lower than this threshold will be filtered out.
random_state : {np.random.RandomState, int}, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
Note that results can still vary due to non-determinism in OS scheduling of the worker processes.
minimum_phi_value : float, optional
if `per_word_topics` is True, this represents a lower bound on the term probabilities.
per_word_topics : bool
If True, the model also computes a list of topics, sorted in descending order of most likely topics for
each word, along with their phi values multiplied by the feature length (i.e. word count).
dtype : {numpy.float16, numpy.float32, numpy.float64}, optional
Data-type to use during calculations inside model. All inputs are also converted.
"""
self.workers = max(1, cpu_count() - 1) if workers is None else workers
self.batch = batch
if isinstance(alpha, str) and alpha == 'auto':
raise NotImplementedError("auto-tuning alpha not implemented in LdaMulticore; use plain LdaModel.")
super(LdaMulticore, self).__init__(
corpus=corpus, num_topics=num_topics,
id2word=id2word, chunksize=chunksize, passes=passes, alpha=alpha, eta=eta,
decay=decay, offset=offset, eval_every=eval_every, iterations=iterations,
gamma_threshold=gamma_threshold, random_state=random_state, minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value, per_word_topics=per_word_topics, dtype=dtype,
)
def update(self, corpus, chunks_as_numpy=False):
"""Train the model with new documents, by EM-iterating over `corpus` until the topics converge
(or until the maximum number of allowed iterations is reached).
Train the model with new documents, by EM-iterating over the corpus until the topics converge, or until
the maximum number of allowed iterations is reached. `corpus` must be an iterable. The E step is distributed
into the several processes.
Notes
-----
This update also supports updating an already trained model (`self`) with new documents from `corpus`;
the two models are then merged in proportion to the number of old vs. new documents.
This feature is still experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of `'Online Learning for LDA' by Hoffman et al.`_
and is guaranteed to converge for any `decay` in (0.5, 1].
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`) used to update the
model.
chunks_as_numpy : bool
Whether each chunk passed to the inference step should be a np.ndarray or not. Numpy can in some settings
turn the term IDs into floats, these will be converted back into integers in inference, which incurs a
performance hit. For distributed computing it may be desirable to keep the chunks as `numpy.ndarray`.
"""
try:
lencorpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaMulticore.update() called with an empty corpus")
return
self.state.numdocs += lencorpus
if self.batch:
updatetype = "batch"
updateafter = lencorpus
else:
updatetype = "online"
updateafter = self.chunksize * self.workers
eval_every = self.eval_every or 0
evalafter = min(lencorpus, eval_every * updateafter)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over the supplied corpus of %i documents, "
"updating every %i documents, evaluating every ~%i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, self.passes, lencorpus, updateafter,
evalafter, self.iterations, self.gamma_threshold
)
if updates_per_pass * self.passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
job_queue = Queue(maxsize=2 * self.workers)
result_queue = Queue()
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(self.offset + pass_ + (self.num_updates / self.chunksize), -self.decay)
def process_result_queue(force=False):
"""
Clear the result queue, merging all intermediate results, and update the
LDA model if necessary.
"""
merged_new = False
while not result_queue.empty():
other.merge(result_queue.get())
queue_size[0] -= 1
merged_new = True
if (force and merged_new and queue_size[0] == 0) or (other.numdocs >= updateafter):
self.do_mstep(rho(), other, pass_ > 0)
other.reset()
if eval_every > 0 and (force or (self.num_updates / updateafter) % eval_every == 0):
self.log_perplexity(chunk, total_docs=lencorpus)
logger.info("training LDA model using %i processes", self.workers)
pool = Pool(self.workers, worker_e_step, (job_queue, result_queue, self))
for pass_ in range(self.passes):
queue_size, reallen = [0], 0
other = LdaState(self.eta, self.state.sstats.shape)
chunk_stream = utils.grouper(corpus, self.chunksize, as_numpy=chunks_as_numpy)
for chunk_no, chunk in enumerate(chunk_stream):
reallen += len(chunk) # keep track of how many documents we've processed so far
# put the chunk into the workers' input job queue
while True:
try:
job_queue.put((chunk_no, chunk, self.state), block=False)
queue_size[0] += 1
logger.info(
"PROGRESS: pass %i, dispatched chunk #%i = documents up to #%i/%i, "
"outstanding queue size %i",
pass_, chunk_no, chunk_no * self.chunksize + len(chunk), lencorpus, queue_size[0]
)
break
except queue.Full:
# in case the input job queue is full, keep clearing the
# result queue, to make sure we don't deadlock
process_result_queue()
process_result_queue()
# endfor single corpus pass
# wait for all outstanding jobs to finish
while queue_size[0] > 0:
process_result_queue(force=True)
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# endfor entire update
pool.terminate()
def worker_e_step(input_queue, result_queue, worker_lda):
"""Perform E-step for each job.
Parameters
----------
input_queue : queue of (int, list of (int, float), :class:`~gensim.models.lda_worker.Worker`)
Each element is a job characterized by its ID, the corpus chunk to be processed in BOW format and the worker
responsible for processing it.
result_queue : queue of :class:`~gensim.models.ldamodel.LdaState`
After the worker finished the job, the state of the resulting (trained) worker model is appended to this queue.
worker_lda : :class:`~gensim.models.ldamulticore.LdaMulticore`
LDA instance which performed e step
"""
logger.debug("worker process entering E-step loop")
while True:
logger.debug("getting a new job")
chunk_no, chunk, w_state = input_queue.get()
logger.debug("processing chunk #%i of %i documents", chunk_no, len(chunk))
worker_lda.state = w_state
worker_lda.sync_state()
worker_lda.state.reset()
worker_lda.do_estep(chunk) # TODO: auto-tune alpha?
del chunk
logger.debug("processed chunk, queuing the result")
result_queue.put(worker_lda.state)
worker_lda.state = None
logger.debug("result put")
| 17,462
|
Python
|
.py
| 291
| 50.223368
| 119
| 0.644848
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,103
|
phrases.py
|
piskvorky_gensim/gensim/models/phrases.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
Automatically detect common phrases -- aka multi-word expressions, word n-gram collocations -- from
a stream of sentences.
Inspired by:
* `Mikolov, et. al: "Distributed Representations of Words and Phrases and their Compositionality"
<https://arxiv.org/abs/1310.4546>`_
* `"Normalized (Pointwise) Mutual Information in Collocation Extraction" by Gerlof Bouma
<https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Create training corpus. Must be a sequence of sentences (e.g. an iterable or a generator).
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> # Each sentence must be a list of string tokens:
>>> first_sentence = next(iter(sentences))
>>> print(first_sentence[:10])
['computer', 'human', 'interface', 'computer', 'response', 'survey', 'system', 'time', 'user', 'interface']
>>>
>>> # Train a toy phrase model on our training corpus.
>>> phrase_model = Phrases(sentences, min_count=1, threshold=1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> # Apply the trained phrases model to a new, unseen sentence.
>>> new_sentence = ['trees', 'graph', 'minors']
>>> phrase_model[new_sentence]
['trees_graph', 'minors']
>>> # The toy model considered "trees graph" a single phrase => joined the two
>>> # tokens into a single "phrase" token, using our selected `_` delimiter.
>>>
>>> # Apply the trained model to each sentence of a corpus, using the same [] syntax:
>>> for sent in phrase_model[sentences]:
... pass
>>>
>>> # Update the model with two new sentences on the fly.
>>> phrase_model.add_vocab([["hello", "world"], ["meow"]])
>>>
>>> # Export the trained model = use less RAM, faster processing. Model updates no longer possible.
>>> frozen_model = phrase_model.freeze()
>>> # Apply the frozen model; same results as before:
>>> frozen_model[new_sentence]
['trees_graph', 'minors']
>>>
>>> # Save / load models.
>>> frozen_model.save("/tmp/my_phrase_model.pkl")
>>> model_reloaded = Phrases.load("/tmp/my_phrase_model.pkl")
>>> model_reloaded[['trees', 'graph', 'minors']] # apply the reloaded model to a sentence
['trees_graph', 'minors']
"""
import logging
import itertools
from math import log
import pickle
from inspect import getfullargspec as getargspec
import time
from gensim import utils, interfaces
logger = logging.getLogger(__name__)
NEGATIVE_INFINITY = float('-inf')
# Words from this set are "ignored" during phrase detection:
# 1) Phrases may not start nor end with these words.
# 2) Phrases may include any number of these words inside.
ENGLISH_CONNECTOR_WORDS = frozenset(
" a an the " # articles; we never care about these in MWEs
" for of with without at from to in on by " # prepositions; incomplete on purpose, to minimize FNs
" and or " # conjunctions; incomplete on purpose, to minimize FNs
.split()
)
def original_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
r"""Bigram scoring function, based on the original `Mikolov, et. al: "Distributed Representations
of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_.
Parameters
----------
worda_count : int
Number of occurrences for first word.
wordb_count : int
Number of occurrences for second word.
bigram_count : int
Number of co-occurrences for phrase "worda_wordb".
len_vocab : int
Size of vocabulary.
min_count: int
Minimum collocation count threshold.
corpus_word_count : int
Not used in this particular scoring technique.
Returns
-------
float
Score for given phrase. Can be negative.
Notes
-----
Formula: :math:`\frac{(bigram\_count - min\_count) * len\_vocab }{ (worda\_count * wordb\_count)}`.
"""
denom = worda_count * wordb_count
if denom == 0:
return NEGATIVE_INFINITY
return (bigram_count - min_count) / float(denom) * len_vocab
def npmi_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
r"""Calculation NPMI score based on `"Normalized (Pointwise) Mutual Information in Colocation Extraction"
by Gerlof Bouma <https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_.
Parameters
----------
worda_count : int
Number of occurrences for first word.
wordb_count : int
Number of occurrences for second word.
bigram_count : int
Number of co-occurrences for phrase "worda_wordb".
len_vocab : int
Not used.
min_count: int
Ignore all bigrams with total collected count lower than this value.
corpus_word_count : int
Total number of words in the corpus.
Returns
-------
float
If bigram_count >= min_count, return the collocation score, in the range -1 to 1.
Otherwise return -inf.
Notes
-----
Formula: :math:`\frac{ln(prop(word_a, word_b) / (prop(word_a)*prop(word_b)))}{ -ln(prop(word_a, word_b)}`,
where :math:`prob(word) = \frac{word\_count}{corpus\_word\_count}`
"""
if bigram_count >= min_count:
corpus_word_count = float(corpus_word_count)
pa = worda_count / corpus_word_count
pb = wordb_count / corpus_word_count
pab = bigram_count / corpus_word_count
try:
return log(pab / (pa * pb)) / -log(pab)
except ValueError: # some of the counts were zero => never a phrase
return NEGATIVE_INFINITY
else:
# Return -infinity to make sure that no phrases will be created
# from bigrams less frequent than min_count.
return NEGATIVE_INFINITY
def _is_single(obj):
"""Check whether `obj` is a single document or an entire corpus.
Parameters
----------
obj : object
Return
------
(bool, object)
2-tuple ``(is_single_document, new_obj)`` tuple, where `new_obj`
yields the same sequence as the original `obj`.
Notes
-----
`obj` is a single document if it is an iterable of strings. It is a corpus if it is an iterable of documents.
"""
obj_iter = iter(obj)
temp_iter = obj_iter
try:
peek = next(obj_iter)
obj_iter = itertools.chain([peek], obj_iter)
except StopIteration:
# An empty object is interpreted as a single document (not a corpus).
return True, obj
if isinstance(peek, str):
# First item is a string => obj is a single document for sure.
return True, obj_iter
if temp_iter is obj:
# An iterator / generator => interpret input as a corpus.
return False, obj_iter
# If the first item isn't a string, assume obj is an iterable corpus.
return False, obj
class _PhrasesTransformation(interfaces.TransformationABC):
"""
Abstract base class for :class:`~gensim.models.phrases.Phrases` and
:class:`~gensim.models.phrases.FrozenPhrases`.
"""
def __init__(self, connector_words):
self.connector_words = frozenset(connector_words)
def score_candidate(self, word_a, word_b, in_between):
"""Score a single phrase candidate.
Returns
-------
(str, float)
2-tuple of ``(delimiter-joined phrase, phrase score)`` for a phrase,
or ``(None, None)`` if not a phrase.
"""
raise NotImplementedError("ABC: override this method in child classes")
def analyze_sentence(self, sentence):
"""Analyze a sentence, concatenating any detected phrases into a single token.
Parameters
----------
sentence : iterable of str
Token sequence representing the sentence to be analyzed.
Yields
------
(str, {float, None})
Iterate through the input sentence tokens and yield 2-tuples of:
- ``(concatenated_phrase_tokens, score)`` for token sequences that form a phrase.
- ``(word, None)`` if the token is not a part of a phrase.
"""
start_token, in_between = None, []
for word in sentence:
if word not in self.connector_words:
# The current word is a normal token, not a connector word, which means it's a potential
# beginning (or end) of a phrase.
if start_token:
# We're inside a potential phrase, of which this word is the end.
phrase, score = self.score_candidate(start_token, word, in_between)
if score is not None:
# Phrase detected!
yield phrase, score
start_token, in_between = None, []
else:
# Not a phrase after all. Dissolve the candidate's constituent tokens as individual words.
yield start_token, None
for w in in_between:
yield w, None
start_token, in_between = word, [] # new potential phrase starts here
else:
# Not inside a phrase yet; start a new phrase candidate here.
start_token, in_between = word, []
else: # We're a connector word.
if start_token:
# We're inside a potential phrase: add the connector word and keep growing the phrase.
in_between.append(word)
else:
# Not inside a phrase: emit the connector word and move on.
yield word, None
# Emit any non-phrase tokens at the end.
if start_token:
yield start_token, None
for w in in_between:
yield w, None
def __getitem__(self, sentence):
"""Convert the input sequence of tokens ``sentence`` into a sequence of tokens where adjacent
tokens are replaced by a single token if they form a bigram collocation.
If `sentence` is an entire corpus (iterable of sentences rather than a single
sentence), return an iterable that converts each of the corpus' sentences
into phrases on the fly, one after another.
Parameters
----------
sentence : {list of str, iterable of list of str}
Input sentence or a stream of sentences.
Return
------
{list of str, iterable of list of str}
Sentence with phrase tokens joined by ``self.delimiter``, if input was a single sentence.
A generator of such sentences if input was a corpus.
s """
is_single, sentence = _is_single(sentence)
if not is_single:
# If the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
return self._apply(sentence)
return [token for token, _ in self.analyze_sentence(sentence)]
def find_phrases(self, sentences):
"""Get all unique phrases (multi-word expressions) that appear in ``sentences``, and their scores.
Parameters
----------
sentences : iterable of list of str
Text corpus.
Returns
-------
dict(str, float)
Unique phrases found in ``sentences``, mapped to their scores.
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, min_count=1, threshold=0.1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> for phrase, score in phrases.find_phrases(sentences).items():
... print(phrase, score)
"""
result = {}
for sentence in sentences:
for phrase, score in self.analyze_sentence(sentence):
if score is not None:
result[phrase] = score
return result
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved :class:`~gensim.models.phrases.Phrases` /
:class:`~gensim.models.phrases.FrozenPhrases` model.
Handles backwards compatibility from older versions which did not support pluggable scoring functions.
Parameters
----------
args : object
See :class:`~gensim.utils.SaveLoad.load`.
kwargs : object
See :class:`~gensim.utils.SaveLoad.load`.
"""
model = super(_PhrasesTransformation, cls).load(*args, **kwargs)
# Upgrade FrozenPhrases
try:
phrasegrams = getattr(model, "phrasegrams", {})
component, score = next(iter(phrasegrams.items()))
if isinstance(score, tuple):
# Value in phrasegrams used to be a tuple; keep only the 2nd tuple component = score.
model.phrasegrams = {
str(model.delimiter.join(key), encoding='utf8'): val[1]
for key, val in phrasegrams.items()
}
elif isinstance(component, tuple): # 3.8 => 4.0: phrasegram keys are strings, not tuples with bytestrings
model.phrasegrams = {
str(model.delimiter.join(key), encoding='utf8'): val
for key, val in phrasegrams.items()
}
except StopIteration:
# no phrasegrams, nothing to upgrade
pass
# If no scoring parameter, use default scoring.
if not hasattr(model, 'scoring'):
logger.warning('older version of %s loaded without scoring function', cls.__name__)
logger.warning('setting pluggable scoring method to original_scorer for compatibility')
model.scoring = original_scorer
# If there is a scoring parameter, and it's a text value, load the proper scoring function.
if hasattr(model, 'scoring'):
if isinstance(model.scoring, str):
if model.scoring == 'default':
logger.warning('older version of %s loaded with "default" scoring parameter', cls.__name__)
logger.warning('setting scoring method to original_scorer for compatibility')
model.scoring = original_scorer
elif model.scoring == 'npmi':
logger.warning('older version of %s loaded with "npmi" scoring parameter', cls.__name__)
logger.warning('setting scoring method to npmi_scorer for compatibility')
model.scoring = npmi_scorer
else:
raise ValueError(f'failed to load {cls.__name__} model, unknown scoring "{model.scoring}"')
# common_terms didn't exist pre-3.?, and was renamed to connector in 4.0.0.
if not hasattr(model, "connector_words"):
if hasattr(model, "common_terms"):
model.connector_words = model.common_terms
del model.common_terms
else:
logger.warning('loaded older version of %s, setting connector_words to an empty set', cls.__name__)
model.connector_words = frozenset()
if not hasattr(model, 'corpus_word_count'):
logger.warning('older version of %s loaded without corpus_word_count', cls.__name__)
logger.warning('setting corpus_word_count to 0, do not use it in your scoring function')
model.corpus_word_count = 0
# Before 4.0.0, we stored strings as UTF8 bytes internally, to save RAM. Since 4.0.0, we use strings.
if getattr(model, 'vocab', None):
word = next(iter(model.vocab)) # get a random key – any key will do
if not isinstance(word, str):
logger.info("old version of %s loaded, upgrading %i words in memory", cls.__name__, len(model.vocab))
logger.info("re-save the loaded model to avoid this upgrade in the future")
vocab = {}
for key, value in model.vocab.items(): # needs lots of extra RAM temporarily!
vocab[str(key, encoding='utf8')] = value
model.vocab = vocab
if not isinstance(model.delimiter, str):
model.delimiter = str(model.delimiter, encoding='utf8')
return model
class Phrases(_PhrasesTransformation):
"""Detect phrases based on collocation counts."""
def __init__(
self, sentences=None, min_count=5, threshold=10.0,
max_vocab_size=40000000, delimiter='_', progress_per=10000,
scoring='default', connector_words=frozenset(),
):
"""
Parameters
----------
sentences : iterable of list of str, optional
The `sentences` iterable can be simply a list, but for larger corpora, consider a generator that streams
the sentences directly from disk/network, See :class:`~gensim.models.word2vec.BrownCorpus`,
:class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence`
for such examples.
min_count : float, optional
Ignore all words and bigrams with total collected count lower than this value.
threshold : float, optional
Represent a score threshold for forming the phrases (higher means fewer phrases).
A phrase of words `a` followed by `b` is accepted if the score of the phrase is greater than threshold.
Heavily depends on concrete scoring-function, see the `scoring` parameter.
max_vocab_size : int, optional
Maximum size (number of tokens) of the vocabulary. Used to control pruning of less common words,
to keep memory under control. The default of 40M needs about 3.6GB of RAM. Increase/decrease
`max_vocab_size` depending on how much available memory you have.
delimiter : str, optional
Glue character used to join collocation tokens.
scoring : {'default', 'npmi', function}, optional
Specify how potential phrases are scored. `scoring` can be set with either a string that refers to a
built-in scoring function, or with a function with the expected parameter names.
Two built-in scoring functions are available by setting `scoring` to a string:
#. "default" - :func:`~gensim.models.phrases.original_scorer`.
#. "npmi" - :func:`~gensim.models.phrases.npmi_scorer`.
connector_words : set of str, optional
Set of words that may be included within a phrase, without affecting its scoring.
No phrase can start nor end with a connector word; a phrase may contain any number of
connector words in the middle.
**If your texts are in English, set** ``connector_words=phrases.ENGLISH_CONNECTOR_WORDS``.
This will cause phrases to include common English articles, prepositions and
conjuctions, such as `bank_of_america` or `eye_of_the_beholder`.
For other languages or specific applications domains, use custom ``connector_words``
that make sense there: ``connector_words=frozenset("der die das".split())`` etc.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Load corpus and train a model.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, min_count=1, threshold=1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> # Use the model to detect phrases in a new sentence.
>>> sent = [u'trees', u'graph', u'minors']
>>> print(phrases[sent])
[u'trees_graph', u'minors']
>>>
>>> # Or transform multiple sentences at once.
>>> sents = [[u'trees', u'graph', u'minors'], [u'graph', u'minors']]
>>> for phrase in phrases[sents]:
... print(phrase)
[u'trees_graph', u'minors']
[u'graph_minors']
>>>
>>> # Export a FrozenPhrases object that is more efficient but doesn't allow any more training.
>>> frozen_phrases = phrases.freeze()
>>> print(frozen_phrases[sent])
[u'trees_graph', u'minors']
Notes
-----
The ``scoring="npmi"`` is more robust when dealing with common words that form part of common bigrams, and
ranges from -1 to 1, but is slower to calculate than the default ``scoring="default"``.
The default is the PMI-like scoring as described in `Mikolov, et. al: "Distributed
Representations of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_.
To use your own custom ``scoring`` function, pass in a function with the following signature:
* ``worda_count`` - number of corpus occurrences in `sentences` of the first token in the bigram being scored
* ``wordb_count`` - number of corpus occurrences in `sentences` of the second token in the bigram being scored
* ``bigram_count`` - number of occurrences in `sentences` of the whole bigram
* ``len_vocab`` - the number of unique tokens in `sentences`
* ``min_count`` - the `min_count` setting of the Phrases class
* ``corpus_word_count`` - the total number of tokens (non-unique) in `sentences`
The scoring function must accept all these parameters, even if it doesn't use them in its scoring.
The scoring function **must be pickleable**.
"""
super().__init__(connector_words=connector_words)
if min_count <= 0:
raise ValueError("min_count should be at least 1")
if threshold <= 0 and scoring == 'default':
raise ValueError("threshold should be positive for default scoring")
if scoring == 'npmi' and (threshold < -1 or threshold > 1):
raise ValueError("threshold should be between -1 and 1 for npmi scoring")
# Set scoring based on string.
# Intentially override the value of the scoring parameter rather than set self.scoring here,
# to still run the check of scoring function parameters in the next code block.
if isinstance(scoring, str):
if scoring == 'default':
scoring = original_scorer
elif scoring == 'npmi':
scoring = npmi_scorer
else:
raise ValueError(f'unknown scoring method string {scoring} specified')
scoring_params = [
'worda_count', 'wordb_count', 'bigram_count', 'len_vocab', 'min_count', 'corpus_word_count',
]
if callable(scoring):
missing = [param for param in scoring_params if param not in getargspec(scoring)[0]]
if not missing:
self.scoring = scoring
else:
raise ValueError(f'scoring function missing expected parameters {missing}')
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.vocab = {} # mapping between token => its count
self.min_reduce = 1 # ignore any tokens with count smaller than this
self.delimiter = delimiter
self.progress_per = progress_per
self.corpus_word_count = 0
# Ensure picklability of the scorer.
try:
pickle.loads(pickle.dumps(self.scoring))
except pickle.PickleError:
raise pickle.PickleError(f'Custom scoring function in {self.__class__.__name__} must be pickle-able')
if sentences is not None:
start = time.time()
self.add_vocab(sentences)
self.add_lifecycle_event("created", msg=f"built {self} in {time.time() - start:.2f}s")
def __str__(self):
return "%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>" % (
self.__class__.__name__, len(self.vocab), self.min_count,
self.threshold, self.max_vocab_size,
)
@staticmethod
def _learn_vocab(sentences, max_vocab_size, delimiter, connector_words, progress_per):
"""Collect unigram and bigram counts from the `sentences` iterable."""
sentence_no, total_words, min_reduce = -1, 0, 1
vocab = {}
logger.info("collecting all words and their counts")
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words and %i word types",
sentence_no, total_words, len(vocab),
)
start_token, in_between = None, []
for word in sentence:
if word not in connector_words:
vocab[word] = vocab.get(word, 0) + 1
if start_token is not None:
phrase_tokens = itertools.chain([start_token], in_between, [word])
joined_phrase_token = delimiter.join(phrase_tokens)
vocab[joined_phrase_token] = vocab.get(joined_phrase_token, 0) + 1
start_token, in_between = word, [] # treat word as both end of a phrase AND beginning of another
elif start_token is not None:
in_between.append(word)
total_words += 1
if len(vocab) > max_vocab_size:
utils.prune_vocab(vocab, min_reduce)
min_reduce += 1
logger.info(
"collected %i token types (unigram + bigrams) from a corpus of %i words and %i sentences",
len(vocab), total_words, sentence_no + 1,
)
return min_reduce, vocab, total_words
def add_vocab(self, sentences):
"""Update model parameters with new `sentences`.
Parameters
----------
sentences : iterable of list of str
Text corpus to update this model's parameters from.
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Train a phrase detector from a text corpus.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, connector_words=ENGLISH_CONNECTOR_WORDS) # train model
>>> assert len(phrases.vocab) == 37
>>>
>>> more_sentences = [
... [u'the', u'mayor', u'of', u'new', u'york', u'was', u'there'],
... [u'machine', u'learning', u'can', u'be', u'new', u'york', u'sometimes'],
... ]
>>>
>>> phrases.add_vocab(more_sentences) # add new sentences to model
>>> assert len(phrases.vocab) == 60
"""
# Uses a separate vocab to collect the token counts from `sentences`.
# This consumes more RAM than merging new sentences into `self.vocab`
# directly, but gives the new sentences a fighting chance to collect
# sufficient counts, before being pruned out by the (large) accumulated
# counts collected in previous learn_vocab runs.
min_reduce, vocab, total_words = self._learn_vocab(
sentences, max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, connector_words=self.connector_words,
)
self.corpus_word_count += total_words
if self.vocab:
logger.info("merging %i counts into %s", len(vocab), self)
self.min_reduce = max(self.min_reduce, min_reduce)
for word, count in vocab.items():
self.vocab[word] = self.vocab.get(word, 0) + count
if len(self.vocab) > self.max_vocab_size:
utils.prune_vocab(self.vocab, self.min_reduce)
self.min_reduce += 1
else:
# Optimization for a common case: the current vocab is empty, so apply
# the new vocab directly, no need to double it in memory.
self.vocab = vocab
logger.info("merged %s", self)
def score_candidate(self, word_a, word_b, in_between):
# Micro optimization: check for quick early-out conditions, before the actual scoring.
word_a_cnt = self.vocab.get(word_a, 0)
if word_a_cnt <= 0:
return None, None
word_b_cnt = self.vocab.get(word_b, 0)
if word_b_cnt <= 0:
return None, None
phrase = self.delimiter.join([word_a] + in_between + [word_b])
# XXX: Why do we care about *all* phrase tokens? Why not just score the start+end bigram?
phrase_cnt = self.vocab.get(phrase, 0)
if phrase_cnt <= 0:
return None, None
score = self.scoring(
worda_count=word_a_cnt, wordb_count=word_b_cnt, bigram_count=phrase_cnt,
len_vocab=len(self.vocab), min_count=self.min_count, corpus_word_count=self.corpus_word_count,
)
if score <= self.threshold:
return None, None
return phrase, score
def freeze(self):
"""
Return an object that contains the bare minimum of information while still allowing
phrase detection. See :class:`~gensim.models.phrases.FrozenPhrases`.
Use this "frozen model" to dramatically reduce RAM footprint if you don't plan to
make any further changes to your `Phrases` model.
Returns
-------
:class:`~gensim.models.phrases.FrozenPhrases`
Exported object that's smaller, faster, but doesn't support model updates.
"""
return FrozenPhrases(self)
def export_phrases(self):
"""Extract all found phrases.
Returns
------
dict(str, float)
Mapping between phrases and their scores.
"""
result, source_vocab = {}, self.vocab
for token in source_vocab:
unigrams = token.split(self.delimiter)
if len(unigrams) < 2:
continue # no phrases here
phrase, score = self.score_candidate(unigrams[0], unigrams[-1], unigrams[1:-1])
if score is not None:
result[phrase] = score
return result
class FrozenPhrases(_PhrasesTransformation):
"""Minimal state & functionality exported from a trained :class:`~gensim.models.phrases.Phrases` model.
The goal of this class is to cut down memory consumption of `Phrases`, by discarding model state
not strictly needed for the phrase detection task.
Use this instead of `Phrases` if you do not need to update the bigram statistics with new documents any more.
"""
def __init__(self, phrases_model):
"""
Parameters
----------
phrases_model : :class:`~gensim.models.phrases.Phrases`
Trained phrases instance, to extract all phrases from.
Notes
-----
After the one-time initialization, a :class:`~gensim.models.phrases.FrozenPhrases` will be much
smaller and faster than using the full :class:`~gensim.models.phrases.Phrases` model.
Examples
----------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Load corpus and train a model.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, min_count=1, threshold=1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> # Export a FrozenPhrases object that is more efficient but doesn't allow further training.
>>> frozen_phrases = phrases.freeze()
>>> print(frozen_phrases[sent])
[u'trees_graph', u'minors']
"""
self.threshold = phrases_model.threshold
self.min_count = phrases_model.min_count
self.delimiter = phrases_model.delimiter
self.scoring = phrases_model.scoring
self.connector_words = phrases_model.connector_words
logger.info('exporting phrases from %s', phrases_model)
start = time.time()
self.phrasegrams = phrases_model.export_phrases()
self.add_lifecycle_event("created", msg=f"exported {self} from {phrases_model} in {time.time() - start:.2f}s")
def __str__(self):
return "%s<%i phrases, min_count=%s, threshold=%s>" % (
self.__class__.__name__, len(self.phrasegrams), self.min_count, self.threshold,
)
def score_candidate(self, word_a, word_b, in_between):
phrase = self.delimiter.join([word_a] + in_between + [word_b])
score = self.phrasegrams.get(phrase, NEGATIVE_INFINITY)
if score > self.threshold:
return phrase, score
return None, None
Phraser = FrozenPhrases # alias for backward compatibility
| 34,071
|
Python
|
.py
| 673
| 40.320951
| 118
| 0.615086
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,104
|
ensemblelda.py
|
piskvorky_gensim/gensim/models/ensemblelda.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Tobias Brigl <github.com/sezanzeb>, Alex Salles <alex.salles@gmail.com>,
# Alex Loosley <aloosley@alumni.brown.edu>, Data Reply Munich
# Copyright (C) 2021 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Ensemble Latent Dirichlet Allocation (eLDA), an algorithm for extracting reliable topics.
The aim of topic modelling is to find a set of topics that represent the global structure of a corpus of documents. One
issue that occurs with topics extracted from an NMF or LDA model is reproducibility. That is, if the topic model is
trained repeatedly allowing only the random seed to change, would the same (or similar) topic representation be reliably
learned. Unreliable topics are undesireable because they are not a good representation of the corpus.
Ensemble LDA addresses the issue by training an ensemble of topic models and throwing out topics that do not reoccur
across the ensemble. In this regard, the topics extracted are more reliable and there is the added benefit over many
topic models that the user does not need to know the exact number of topics ahead of time.
For more information, see the :ref:`citation section <Citation>` below, watch our `Machine Learning Prague 2019 talk
<https://slideslive.com/38913528/solving-the-text-labeling-challenge-with-ensemblelda-and-active-learning?locale=cs>`_,
or view our `Machine Learning Summer School poster
<https://github.com/aloosley/ensembleLDA/blob/master/mlss/mlss_poster_v2.pdf>`_.
Usage examples
--------------
Train an ensemble of LdaModels using a Gensim corpus:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.models import EnsembleLda
>>>
>>> # Create a corpus from a list of texts
>>> common_dictionary = Dictionary(common_texts)
>>> common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
>>>
>>> # Train the model on the corpus. corpus has to be provided as a
>>> # keyword argument, as they are passed through to the children.
>>> elda = EnsembleLda(corpus=common_corpus, id2word=common_dictionary, num_topics=10, num_models=4)
Save a model to disk, or reload a pre-trained model:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Save model to disk.
>>> temp_file = datapath("model")
>>> elda.save(temp_file)
>>>
>>> # Load a potentially pretrained model from disk.
>>> elda = EnsembleLda.load(temp_file)
Query, the model using new, unseen documents:
.. sourcecode:: pycon
>>> # Create a new corpus, made of previously unseen documents.
>>> other_texts = [
... ['computer', 'time', 'graph'],
... ['survey', 'response', 'eps'],
... ['human', 'system', 'computer']
... ]
>>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]
>>>
>>> unseen_doc = other_corpus[0]
>>> vector = elda[unseen_doc] # get topic probability distribution for a document
Increase the ensemble size by adding a new model. Make sure it uses the same dictionary:
.. sourcecode:: pycon
>>> from gensim.models import LdaModel
>>> elda.add_model(LdaModel(common_corpus, id2word=common_dictionary, num_topics=10))
>>> elda.recluster()
>>> vector = elda[unseen_doc]
To optimize the ensemble for your specific case, the children can be clustered again using
different hyperparameters:
.. sourcecode:: pycon
>>> elda.recluster(eps=0.2)
.. _Citation:
Citation
--------
BRIGL, Tobias, 2019, Extracting Reliable Topics using Ensemble Latent Dirichlet Allocation [Bachelor Thesis].
Technische Hochschule Ingolstadt. Munich: Data Reply GmbH. Supervised by Alex Loosley. Available from:
https://www.sezanzeb.de/machine_learning/ensemble_LDA/
"""
import logging
import os
from multiprocessing import Process, Pipe, ProcessError
import importlib
from typing import Set, Optional, List
import numpy as np
from scipy.spatial.distance import cosine
from dataclasses import dataclass
from gensim import utils
from gensim.models import ldamodel, ldamulticore, basemodel
from gensim.utils import SaveLoad
logger = logging.getLogger(__name__)
# _COSINE_DISTANCE_CALCULATION_THRESHOLD is used so that cosine distance calculations can be sped up by skipping
# distance calculations for highly masked topic-term distributions
_COSINE_DISTANCE_CALCULATION_THRESHOLD = 0.05
# nps max random state of 2**32 - 1 is too large for windows
_MAX_RANDOM_STATE = np.iinfo(np.int32).max
@dataclass
class Topic:
is_core: bool # if the topic has enough neighbors
neighboring_labels: Set[int] # which other clusters are close by
neighboring_topic_indices: Set[int] # which other topics are close by
label: Optional[int] # to which cluster this topic belongs
num_neighboring_labels: int # how many different labels a core has as parents
valid_neighboring_labels: Set[int] # A set of labels of close by clusters that are large enough
@dataclass
class Cluster:
max_num_neighboring_labels: int # the max number of parent labels among each topic of a given cluster
neighboring_labels: List[Set[int]] # a concatenated list of the neighboring_labels sets of each topic
label: int # the unique identifier of the cluster
num_cores: int # how many topics in the cluster are cores
def _is_valid_core(topic):
"""Check if the topic is a valid core, i.e. no neighboring valid cluster is overlapping with it.
Parameters
----------
topic : :class:`Topic`
topic to validate
"""
return topic.is_core and (topic.valid_neighboring_labels == {topic.label})
def _remove_from_all_sets(label, clusters):
"""Remove a label from every set in "neighboring_labels" for each core in ``clusters``."""
for cluster in clusters:
for neighboring_labels_set in cluster.neighboring_labels:
if label in neighboring_labels_set:
neighboring_labels_set.remove(label)
def _contains_isolated_cores(label, cluster, min_cores):
"""Check if the cluster has at least ``min_cores`` of cores that belong to no other cluster."""
return sum([neighboring_labels == {label} for neighboring_labels in cluster.neighboring_labels]) >= min_cores
def _aggregate_topics(grouped_by_labels):
"""Aggregate the labeled topics to a list of clusters.
Parameters
----------
grouped_by_labels : dict of (int, list of :class:`Topic`)
The return value of _group_by_labels. A mapping of the label to a list of each topic which belongs to the
label.
Returns
-------
list of :class:`Cluster`
It is sorted by max_num_neighboring_labels in descending order. There is one single element for each cluster.
"""
clusters = []
for label, topics in grouped_by_labels.items():
max_num_neighboring_labels = 0
neighboring_labels = [] # will be a list of sets
for topic in topics:
max_num_neighboring_labels = max(topic.num_neighboring_labels, max_num_neighboring_labels)
neighboring_labels.append(topic.neighboring_labels)
neighboring_labels = [x for x in neighboring_labels if len(x) > 0]
clusters.append(Cluster(
max_num_neighboring_labels=max_num_neighboring_labels,
neighboring_labels=neighboring_labels,
label=label,
num_cores=len([topic for topic in topics if topic.is_core]),
))
logger.info("found %s clusters", len(clusters))
return clusters
def _group_by_labels(cbdbscan_topics):
"""Group all the learned cores by their label, which was assigned in the cluster_model.
Parameters
----------
cbdbscan_topics : list of :class:`Topic`
A list of topic data resulting from fitting a :class:`~CBDBSCAN` object.
After calling .fit on a CBDBSCAN model, the results can be retrieved from it by accessing the .results
member, which can be used as the argument to this function. It is a list of infos gathered during
the clustering step and each element in the list corresponds to a single topic.
Returns
-------
dict of (int, list of :class:`Topic`)
A mapping of the label to a list of topics that belong to that particular label. Also adds
a new member to each topic called num_neighboring_labels, which is the number of
neighboring_labels of that topic.
"""
grouped_by_labels = {}
for topic in cbdbscan_topics:
if topic.is_core:
topic.num_neighboring_labels = len(topic.neighboring_labels)
label = topic.label
if label not in grouped_by_labels:
grouped_by_labels[label] = []
grouped_by_labels[label].append(topic)
return grouped_by_labels
def _teardown(pipes, processes, i):
"""Close pipes and terminate processes.
Parameters
----------
pipes : {list of :class:`multiprocessing.Pipe`}
list of pipes that the processes use to communicate with the parent
processes : {list of :class:`multiprocessing.Process`}
list of worker processes
"""
for parent_conn, child_conn in pipes:
child_conn.close()
parent_conn.close()
for process in processes:
if process.is_alive():
process.terminate()
del process
def mass_masking(a, threshold=None):
"""Original masking method. Returns a new binary mask."""
if threshold is None:
threshold = 0.95
sorted_a = np.sort(a)[::-1]
largest_mass = sorted_a.cumsum() < threshold
smallest_valid = sorted_a[largest_mass][-1]
return a >= smallest_valid
def rank_masking(a, threshold=None):
"""Faster masking method. Returns a new binary mask."""
if threshold is None:
threshold = 0.11
return a > np.sort(a)[::-1][int(len(a) * threshold)]
def _validate_clusters(clusters, min_cores):
"""Check which clusters from the cbdbscan step are significant enough. is_valid is set accordingly."""
# Clusters with noisy invalid neighbors may have a harder time being marked as stable, so start with the
# easy ones and potentially already remove some noise by also sorting smaller clusters to the front.
# This clears up the clusters a bit before checking the ones with many neighbors.
def _cluster_sort_key(cluster):
return cluster.max_num_neighboring_labels, cluster.num_cores, cluster.label
sorted_clusters = sorted(clusters, key=_cluster_sort_key, reverse=False)
for cluster in sorted_clusters:
cluster.is_valid = None
if cluster.num_cores < min_cores:
cluster.is_valid = False
_remove_from_all_sets(cluster.label, sorted_clusters)
# now that invalid clusters are removed, check which clusters contain enough cores that don't belong to any
# other cluster.
for cluster in [cluster for cluster in sorted_clusters if cluster.is_valid is None]:
label = cluster.label
if _contains_isolated_cores(label, cluster, min_cores):
cluster.is_valid = True
else:
cluster.is_valid = False
_remove_from_all_sets(label, sorted_clusters)
return [cluster for cluster in sorted_clusters if cluster.is_valid]
def _generate_topic_models_multiproc(ensemble, num_models, ensemble_workers):
"""Generate the topic models to form the ensemble in a multiprocessed way.
Depending on the used topic model this can result in a speedup.
Parameters
----------
ensemble: EnsembleLda
the ensemble
num_models : int
how many models to train in the ensemble
ensemble_workers : int
into how many processes to split the models will be set to max(workers, num_models), to avoid workers that
are supposed to train 0 models.
to get maximum performance, set to the number of your cores, if non-parallelized models are being used in
the ensemble (LdaModel).
For LdaMulticore, the performance gain is small and gets larger for a significantly smaller corpus.
In that case, ensemble_workers=2 can be used.
"""
# the way random_states is handled needs to prevent getting different results when multiprocessing is on,
# or getting the same results in every lda children. so it is solved by generating a list of state seeds before
# multiprocessing is started.
random_states = [ensemble.random_state.randint(_MAX_RANDOM_STATE) for _ in range(num_models)]
# each worker has to work on at least one model.
# Don't spawn idle workers:
workers = min(ensemble_workers, num_models)
# create worker processes:
# from what I know this is basically forking with a jump to a target function in each child
# so modifying the ensemble object will not modify the one in the parent because of no shared memory
processes = []
pipes = []
num_models_unhandled = num_models # how many more models need to be trained by workers?
for i in range(workers):
parent_conn, child_conn = Pipe()
num_subprocess_models = 0
if i == workers - 1: # i is a index, hence -1
# is this the last worker that needs to be created?
# then task that worker with all the remaining models
num_subprocess_models = num_models_unhandled
else:
num_subprocess_models = int(num_models_unhandled / (workers - i))
# get the chunk from the random states that is meant to be for those models
random_states_for_worker = random_states[-num_models_unhandled:][:num_subprocess_models]
args = (ensemble, num_subprocess_models, random_states_for_worker, child_conn)
try:
process = Process(target=_generate_topic_models_worker, args=args)
processes.append(process)
pipes.append((parent_conn, child_conn))
process.start()
num_models_unhandled -= num_subprocess_models
except ProcessError:
logger.error(f"could not start process {i}")
_teardown(pipes, processes)
raise
# aggregate results
# will also block until workers are finished
for parent_conn, _ in pipes:
answer = parent_conn.recv()
parent_conn.close()
# this does basically the same as the _generate_topic_models function (concatenate all the ttdas):
if not ensemble.memory_friendly_ttda:
ensemble.tms += answer
ttda = np.concatenate([m.get_topics() for m in answer])
else:
ttda = answer
ensemble.ttda = np.concatenate([ensemble.ttda, ttda])
for process in processes:
process.terminate()
def _generate_topic_models(ensemble, num_models, random_states=None):
"""Train the topic models that form the ensemble.
Parameters
----------
ensemble: EnsembleLda
the ensemble
num_models : int
number of models to be generated
random_states : list
list of numbers or np.random.RandomState objects. Will be autogenerated based on the ensembles
RandomState if None (default).
"""
if random_states is None:
random_states = [ensemble.random_state.randint(_MAX_RANDOM_STATE) for _ in range(num_models)]
assert len(random_states) == num_models
kwargs = ensemble.gensim_kw_args.copy()
tm = None # remember one of the topic models from the following
# loop, in order to collect some properties from it afterwards.
for i in range(num_models):
kwargs["random_state"] = random_states[i]
tm = ensemble.get_topic_model_class()(**kwargs)
# adds the lambda (that is the unnormalized get_topics) to ttda, which is
# a list of all those lambdas
ensemble.ttda = np.concatenate([ensemble.ttda, tm.get_topics()])
# only saves the model if it is not "memory friendly"
if not ensemble.memory_friendly_ttda:
ensemble.tms += [tm]
# use one of the tms to get some info that will be needed later
ensemble.sstats_sum = tm.state.sstats.sum()
ensemble.eta = tm.eta
def _generate_topic_models_worker(ensemble, num_models, random_states, pipe):
"""Wrapper for _generate_topic_models to write the results into a pipe.
This is intended to be used inside a subprocess."""
#
# Same as _generate_topic_models, but runs in a separate subprocess, and
# sends the updated ensemble state to the parent subprocess via a pipe.
#
logger.info(f"spawned worker to generate {num_models} topic models")
_generate_topic_models(ensemble=ensemble, num_models=num_models, random_states=random_states)
# send the ttda that is in the child/workers version of the memory into the pipe
# available, after _generate_topic_models has been called in the worker
if ensemble.memory_friendly_ttda:
# remember that this code is inside the worker processes memory,
# so self.ttda is the ttda of only a chunk of models
pipe.send(ensemble.ttda)
else:
pipe.send(ensemble.tms)
pipe.close()
def _calculate_asymmetric_distance_matrix_chunk(
ttda1,
ttda2,
start_index,
masking_method,
masking_threshold,
):
"""Calculate an (asymmetric) distance from each topic in ``ttda1`` to each topic in ``ttda2``.
Parameters
----------
ttda1 and ttda2: 2D arrays of floats
Two ttda matrices that are going to be used for distance calculation. Each row in ttda corresponds to one
topic. Each cell in the resulting matrix corresponds to the distance between a topic pair.
start_index : int
this function might be used in multiprocessing, so start_index has to be set as ttda1 is a chunk of the
complete ttda in that case. start_index would be 0 if ``ttda1 == self.ttda``. When self.ttda is split into
two pieces, each 100 ttdas long, then start_index should be be 100. default is 0
masking_method: function
masking_threshold: float
Returns
-------
2D numpy.ndarray of floats
Asymmetric distance matrix of size ``len(ttda1)`` by ``len(ttda2)``.
"""
# initialize the distance matrix. ndarray is faster than zeros
distances = np.ndarray((len(ttda1), len(ttda2)))
if ttda1.shape[0] > 0 and ttda2.shape[0] > 0:
# the worker might not have received a ttda because it was chunked up too much
# some help to find a better threshold by useful log messages
avg_mask_size = 0
# now iterate over each topic
for ttd1_idx, ttd1 in enumerate(ttda1):
# create mask from ttd1 that removes noise from a and keeps the largest terms
mask = masking_method(ttd1, masking_threshold)
ttd1_masked = ttd1[mask]
avg_mask_size += mask.sum()
# now look at every possible pair for topic a:
for ttd2_idx, ttd2 in enumerate(ttda2):
# distance to itself is 0
if ttd1_idx + start_index == ttd2_idx:
distances[ttd1_idx][ttd2_idx] = 0
continue
# now mask b based on a, which will force the shape of a onto b
ttd2_masked = ttd2[mask]
# Smart distance calculation avoids calculating cosine distance for highly masked topic-term
# distributions that will have distance values near 1.
if ttd2_masked.sum() <= _COSINE_DISTANCE_CALCULATION_THRESHOLD:
distance = 1
else:
distance = cosine(ttd1_masked, ttd2_masked)
distances[ttd1_idx][ttd2_idx] = distance
percent = round(100 * avg_mask_size / ttda1.shape[0] / ttda1.shape[1], 1)
logger.info(f'the given threshold of {masking_threshold} covered on average {percent}% of tokens')
return distances
def _asymmetric_distance_matrix_worker(
worker_id,
entire_ttda,
ttdas_sent,
n_ttdas,
masking_method,
masking_threshold,
pipe,
):
"""Worker that computes the distance to all other nodes from a chunk of nodes."""
logger.info(f"spawned worker {worker_id} to generate {n_ttdas} rows of the asymmetric distance matrix")
# the chunk of ttda that's going to be calculated:
ttda1 = entire_ttda[ttdas_sent:ttdas_sent + n_ttdas]
distance_chunk = _calculate_asymmetric_distance_matrix_chunk(
ttda1=ttda1,
ttda2=entire_ttda,
start_index=ttdas_sent,
masking_method=masking_method,
masking_threshold=masking_threshold,
)
pipe.send((worker_id, distance_chunk)) # remember that this code is inside the workers memory
pipe.close()
def _calculate_assymetric_distance_matrix_multiproc(
workers,
entire_ttda,
masking_method,
masking_threshold,
):
processes = []
pipes = []
ttdas_sent = 0
for i in range(workers):
try:
parent_conn, child_conn = Pipe()
# Load Balancing, for example if there are 9 ttdas and 4 workers, the load will be balanced 2, 2, 2, 3.
n_ttdas = 0
if i == workers - 1: # i is a index, hence -1
# is this the last worker that needs to be created?
# then task that worker with all the remaining models
n_ttdas = len(entire_ttda) - ttdas_sent
else:
n_ttdas = int((len(entire_ttda) - ttdas_sent) / (workers - i))
args = (i, entire_ttda, ttdas_sent, n_ttdas, masking_method, masking_threshold, child_conn)
process = Process(target=_asymmetric_distance_matrix_worker, args=args)
ttdas_sent += n_ttdas
processes.append(process)
pipes.append((parent_conn, child_conn))
process.start()
except ProcessError:
logger.error(f"could not start process {i}")
_teardown(pipes, processes)
raise
distances = []
# note, that the following loop maintains order in how the ttda will be concatenated
# which is very important. Ordering in ttda has to be the same as when using only one process
for parent_conn, _ in pipes:
worker_id, distance_chunk = parent_conn.recv()
parent_conn.close() # child conn will be closed from inside the worker
# this does basically the same as the _generate_topic_models function (concatenate all the ttdas):
distances.append(distance_chunk)
for process in processes:
process.terminate()
return np.concatenate(distances)
class EnsembleLda(SaveLoad):
"""Ensemble Latent Dirichlet Allocation (eLDA), a method of training a topic model ensemble.
Extracts stable topics that are consistently learned across multiple LDA models. eLDA has the added benefit that
the user does not need to know the exact number of topics the topic model should extract ahead of time.
"""
def __init__(
self, topic_model_class="ldamulticore", num_models=3,
min_cores=None, # default value from _generate_stable_topics()
epsilon=0.1, ensemble_workers=1, memory_friendly_ttda=True,
min_samples=None, masking_method=mass_masking, masking_threshold=None,
distance_workers=1, random_state=None, **gensim_kw_args,
):
"""Create and train a new EnsembleLda model.
Will start training immediatelly, except if iterations, passes or num_models is 0 or if the corpus is missing.
Parameters
----------
topic_model_class : str, topic model, optional
Examples:
* 'ldamulticore' (default, recommended)
* 'lda'
* ldamodel.LdaModel
* ldamulticore.LdaMulticore
ensemble_workers : int, optional
Spawns that many processes and distributes the models from the ensemble to those as evenly as possible.
num_models should be a multiple of ensemble_workers.
Setting it to 0 or 1 will both use the non-multiprocessing version. Default: 1
num_models : int, optional
How many LDA models to train in this ensemble.
Default: 3
min_cores : int, optional
Minimum cores a cluster of topics has to contain so that it is recognized as stable topic.
epsilon : float, optional
Defaults to 0.1. Epsilon for the CBDBSCAN clustering that generates the stable topics.
ensemble_workers : int, optional
Spawns that many processes and distributes the models from the ensemble to those as evenly as possible.
num_models should be a multiple of ensemble_workers.
Setting it to 0 or 1 will both use the nonmultiprocessing version. Default: 1
memory_friendly_ttda : boolean, optional
If True, the models in the ensemble are deleted after training and only a concatenation of each model's
topic term distribution (called ttda) is kept to save memory.
Defaults to True. When False, trained models are stored in a list in self.tms, and no models that are not
of a gensim model type can be added to this ensemble using the add_model function.
If False, any topic term matrix can be suplied to add_model.
min_samples : int, optional
Required int of nearby topics for a topic to be considered as 'core' in the CBDBSCAN clustering.
masking_method : function, optional
Choose one of :meth:`~gensim.models.ensemblelda.mass_masking` (default) or
:meth:`~gensim.models.ensemblelda.rank_masking` (percentile, faster).
For clustering, distances between topic-term distributions are asymmetric. In particular, the distance
(technically a divergence) from distribution A to B is more of a measure of if A is contained in B. At a
high level, this involves using distribution A to mask distribution B and then calculating the cosine
distance between the two. The masking can be done in two ways:
1. mass: forms mask by taking the top ranked terms until their cumulative mass reaches the
'masking_threshold'
2. rank: forms mask by taking the top ranked terms (by mass) until the 'masking_threshold' is reached.
For example, a ranking threshold of 0.11 means the top 0.11 terms by weight are used to form a mask.
masking_threshold : float, optional
Default: None, which uses ``0.95`` for "mass", and ``0.11`` for masking_method "rank". In general, too
small a mask threshold leads to inaccurate calculations (no signal) and too big a mask leads to noisy
distance calculations. Defaults are often a good sweet spot for this hyperparameter.
distance_workers : int, optional
When ``distance_workers`` is ``None``, it defaults to ``os.cpu_count()`` for maximum performance. Default is
1, which is not multiprocessed. Set to ``> 1`` to enable multiprocessing.
**gensim_kw_args
Parameters for each gensim model (e.g. :py:class:`gensim.models.LdaModel`) in the ensemble.
"""
if "id2word" not in gensim_kw_args:
gensim_kw_args["id2word"] = None
if "corpus" not in gensim_kw_args:
gensim_kw_args["corpus"] = None
if gensim_kw_args["id2word"] is None and not gensim_kw_args["corpus"] is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
gensim_kw_args["id2word"] = utils.dict_from_corpus(gensim_kw_args["corpus"])
if gensim_kw_args["id2word"] is None and gensim_kw_args["corpus"] is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish "
"input space dimensionality. Corpus should be provided using the "
"`corpus` keyword argument."
)
#
# The following conditional makes no sense, but we're in a rush to
# release and we don't care about this submodule enough to deal with it
# properly, so we disable flake8 for the following line.
#
if type(topic_model_class) == type and issubclass(topic_model_class, ldamodel.LdaModel): # noqa
self.topic_model_class = topic_model_class
else:
kinds = {
"lda": ldamodel.LdaModel,
"ldamulticore": ldamulticore.LdaMulticore
}
if topic_model_class not in kinds:
raise ValueError(
"topic_model_class should be one of 'lda', 'ldamulticode' or a model "
"inheriting from LdaModel"
)
self.topic_model_class = kinds[topic_model_class]
self.num_models = num_models
self.gensim_kw_args = gensim_kw_args
self.memory_friendly_ttda = memory_friendly_ttda
self.distance_workers = distance_workers
self.masking_threshold = masking_threshold
self.masking_method = masking_method
# this will provide the gensim api to the ensemble basically
self.classic_model_representation = None
# the ensembles state
self.random_state = utils.get_random_state(random_state)
self.sstats_sum = 0
self.eta = None
self.tms = []
# initialize empty 2D topic term distribution array (ttda) (number of topics x number of terms)
self.ttda = np.empty((0, len(gensim_kw_args["id2word"])))
self.asymmetric_distance_matrix_outdated = True
# in case the model will not train due to some
# parameters, stop here and don't train.
if num_models <= 0:
return
if gensim_kw_args.get("corpus") is None:
return
if "iterations" in gensim_kw_args and gensim_kw_args["iterations"] <= 0:
return
if "passes" in gensim_kw_args and gensim_kw_args["passes"] <= 0:
return
logger.info(f"generating {num_models} topic models using {ensemble_workers} workers")
if ensemble_workers > 1:
_generate_topic_models_multiproc(self, num_models, ensemble_workers)
else:
_generate_topic_models(self, num_models)
self._generate_asymmetric_distance_matrix()
self._generate_topic_clusters(epsilon, min_samples)
self._generate_stable_topics(min_cores)
# create model that can provide the usual gensim api to the stable topics from the ensemble
self.generate_gensim_representation()
def get_topic_model_class(self):
"""Get the class that is used for :meth:`gensim.models.EnsembleLda.generate_gensim_representation`."""
if self.topic_model_class is None:
instruction = (
'Try setting topic_model_class manually to what the individual models were based on, '
'e.g. LdaMulticore.'
)
try:
module = importlib.import_module(self.topic_model_module_string)
self.topic_model_class = getattr(module, self.topic_model_class_string)
del self.topic_model_module_string
del self.topic_model_class_string
except ModuleNotFoundError:
logger.error(
f'Could not import the "{self.topic_model_class_string}" module in order to provide the '
f'"{self.topic_model_class_string}" class as "topic_model_class" attribute. {instruction}'
)
except AttributeError:
logger.error(
f'Could not import the "{self.topic_model_class_string}" class from the '
f'"{self.topic_model_module_string}" module in order to set the "topic_model_class" attribute. '
f'{instruction}'
)
return self.topic_model_class
def save(self, *args, **kwargs):
if self.get_topic_model_class() is not None:
self.topic_model_module_string = self.topic_model_class.__module__
self.topic_model_class_string = self.topic_model_class.__name__
kwargs['ignore'] = frozenset(kwargs.get('ignore', ())).union(('topic_model_class', ))
super(EnsembleLda, self).save(*args, **kwargs)
save.__doc__ = SaveLoad.save.__doc__
def convert_to_memory_friendly(self):
"""Remove the stored gensim models and only keep their ttdas.
This frees up memory, but you won't have access to the individual models anymore if you intended to use them
outside of the ensemble.
"""
self.tms = []
self.memory_friendly_ttda = True
def generate_gensim_representation(self):
"""Create a gensim model from the stable topics.
The returned representation is an Gensim LdaModel (:py:class:`gensim.models.LdaModel`) that has been
instantiated with an A-priori belief on word probability, eta, that represents the topic-term distributions of
any stable topics the were found by clustering over the ensemble of topic distributions.
When no stable topics have been detected, None is returned.
Returns
-------
:py:class:`gensim.models.LdaModel`
A Gensim LDA Model classic_model_representation for which:
``classic_model_representation.get_topics() == self.get_topics()``
"""
logger.info("generating classic gensim model representation based on results from the ensemble")
sstats_sum = self.sstats_sum
# if sstats_sum (which is the number of words actually) should be wrong for some fantastic funny reason
# that makes you want to peel your skin off, recreate it (takes a while):
if sstats_sum == 0 and "corpus" in self.gensim_kw_args and not self.gensim_kw_args["corpus"] is None:
for document in self.gensim_kw_args["corpus"]:
for token in document:
sstats_sum += token[1]
self.sstats_sum = sstats_sum
stable_topics = self.get_topics()
num_stable_topics = len(stable_topics)
if num_stable_topics == 0:
logger.error(
"the model did not detect any stable topic. You can try to adjust epsilon: "
"recluster(eps=...)"
)
self.classic_model_representation = None
return
# create a new gensim model
params = self.gensim_kw_args.copy()
params["eta"] = self.eta
params["num_topics"] = num_stable_topics
# adjust params in a way that no training happens
params["passes"] = 0 # no training
# iterations is needed for inference, pass it to the model
classic_model_representation = self.get_topic_model_class()(**params)
# when eta was None, use what gensim generates as default eta for the following tasks:
eta = classic_model_representation.eta
if sstats_sum == 0:
sstats_sum = classic_model_representation.state.sstats.sum()
self.sstats_sum = sstats_sum
# the following is important for the denormalization
# to generate the proper sstats for the new gensim model:
# transform to dimensionality of stable_topics. axis=1 is summed
eta_sum = 0
if isinstance(eta, (int, float)):
eta_sum = [eta * len(stable_topics[0])] * num_stable_topics
else:
if len(eta.shape) == 1: # [e1, e2, e3]
eta_sum = [[eta.sum()]] * num_stable_topics
if len(eta.shape) > 1: # [[e11, e12, ...], [e21, e22, ...], ...]
eta_sum = np.array(eta.sum(axis=1)[:, None])
# the factor, that will be used when get_topics() is used, for normalization
# will never change, because the sum for eta as well as the sum for sstats is constant.
# Therefore predicting normalization_factor becomes super easy.
# corpus is a mapping of id to occurrences
# so one can also easily calculate the
# right sstats, so that get_topics() will return the stable topics no
# matter eta.
normalization_factor = np.array([[sstats_sum / num_stable_topics]] * num_stable_topics) + eta_sum
sstats = stable_topics * normalization_factor
sstats -= eta
classic_model_representation.state.sstats = sstats.astype(np.float32)
# fix expElogbeta.
classic_model_representation.sync_state()
self.classic_model_representation = classic_model_representation
return classic_model_representation
def add_model(self, target, num_new_models=None):
"""Add the topic term distribution array (ttda) of another model to the ensemble.
This way, multiple topic models can be connected to an ensemble manually. Make sure that all the models use
the exact same dictionary/idword mapping.
In order to generate new stable topics afterwards, use:
2. ``self.``:meth:`~gensim.models.ensemblelda.EnsembleLda.recluster`
The ttda of another ensemble can also be used, in that case set ``num_new_models`` to the ``num_models``
parameter of the ensemble, that means the number of classic models in the ensemble that generated the ttda.
This is important, because that information is used to estimate "min_samples" for _generate_topic_clusters.
If you trained this ensemble in the past with a certain Dictionary that you want to reuse for other
models, you can get it from: ``self.id2word``.
Parameters
----------
target : {see description}
1. A single EnsembleLda object
2. List of EnsembleLda objects
3. A single Gensim topic model (e.g. (:py:class:`gensim.models.LdaModel`)
4. List of Gensim topic models
if memory_friendly_ttda is True, target can also be:
5. topic-term-distribution-array
example: [[0.1, 0.1, 0.8], [...], ...]
[topic1, topic2, ...]
with topic being an array of probabilities:
[token1, token2, ...]
token probabilities in a single topic sum to one, therefore, all the words sum to len(ttda)
num_new_models : integer, optional
the model keeps track of how many models were used in this ensemble. Set higher if ttda contained topics
from more than one model. Default: None, which takes care of it automatically.
If target is a 2D-array of float values, it assumes 1.
If the ensemble has ``memory_friendly_ttda`` set to False, then it will always use the number of models in
the target parameter.
"""
# If the model has never seen a ttda before, initialize.
# If it has, append.
# Be flexible. Can be a single element or a list of elements
# make sure it is a numpy array
if not isinstance(target, (np.ndarray, list)):
target = np.array([target])
else:
target = np.array(target)
assert len(target) > 0
if self.memory_friendly_ttda:
# for memory friendly models/ttdas, append the ttdas to itself
detected_num_models = 0
ttda = []
# 1. ttda array, because that's the only accepted input that contains numbers
if isinstance(target.dtype.type(), (np.number, float)):
ttda = target
detected_num_models = 1
# 2. list of ensemblelda objects
elif isinstance(target[0], type(self)):
ttda = np.concatenate([ensemble.ttda for ensemble in target], axis=0)
detected_num_models = sum([ensemble.num_models for ensemble in target])
# 3. list of gensim models
elif isinstance(target[0], basemodel.BaseTopicModel):
ttda = np.concatenate([model.get_topics() for model in target], axis=0)
detected_num_models = len(target)
# unknown
else:
raise ValueError(f"target is of unknown type or a list of unknown types: {type(target[0])}")
# new models were added, increase num_models
# if the user didn't provide a custon numer to use
if num_new_models is None:
self.num_models += detected_num_models
else:
self.num_models += num_new_models
else: # memory unfriendly ensembles
ttda = []
# 1. ttda array
if isinstance(target.dtype.type(), (np.number, float)):
raise ValueError(
'ttda arrays cannot be added to ensembles, for which memory_friendly_ttda=False, '
'you can call convert_to_memory_friendly, but it will discard the stored gensim '
'models and only keep the relevant topic term distributions from them.'
)
# 2. list of ensembles
elif isinstance(target[0], type(self)):
for ensemble in target:
self.tms += ensemble.tms
ttda = np.concatenate([ensemble.ttda for ensemble in target], axis=0)
# 3. list of gensim models
elif isinstance(target[0], basemodel.BaseTopicModel):
self.tms += target.tolist()
ttda = np.concatenate([model.get_topics() for model in target], axis=0)
# unknown
else:
raise ValueError(f"target is of unknown type or a list of unknown types: {type(target[0])}")
# in this case, len(self.tms) should
# always match self.num_models
if num_new_models is not None and num_new_models + self.num_models != len(self.tms):
logger.info(
'num_new_models will be ignored. num_models should match the number of '
'stored models for a memory unfriendly ensemble'
)
self.num_models = len(self.tms)
logger.info(f"ensemble contains {self.num_models} models and {len(self.ttda)} topics now")
if self.ttda.shape[1] != ttda.shape[1]:
raise ValueError(
f"target ttda dimensions do not match. Topics must be {self.ttda.shape[-1]} but was {ttda.shape[-1]} "
f"elements large"
)
self.ttda = np.append(self.ttda, ttda, axis=0)
# tell recluster that the distance matrix needs to be regenerated
self.asymmetric_distance_matrix_outdated = True
def _generate_asymmetric_distance_matrix(self):
"""Calculate the pairwise distance matrix for all the ttdas from the ensemble.
Returns the asymmetric pairwise distance matrix that is used in the DBSCAN clustering.
Afterwards, the model needs to be reclustered for this generated matrix to take effect.
"""
workers = self.distance_workers
# matrix is up to date afterwards
self.asymmetric_distance_matrix_outdated = False
logger.info(f"generating a {len(self.ttda)} x {len(self.ttda)} asymmetric distance matrix...")
if workers is not None and workers <= 1:
self.asymmetric_distance_matrix = _calculate_asymmetric_distance_matrix_chunk(
ttda1=self.ttda,
ttda2=self.ttda,
start_index=0,
masking_method=self.masking_method,
masking_threshold=self.masking_threshold,
)
else:
# best performance on 2-core machine: 2 workers
if workers is None:
workers = os.cpu_count()
self.asymmetric_distance_matrix = _calculate_assymetric_distance_matrix_multiproc(
workers=workers,
entire_ttda=self.ttda,
masking_method=self.masking_method,
masking_threshold=self.masking_threshold,
)
def _generate_topic_clusters(self, eps=0.1, min_samples=None):
"""Run the CBDBSCAN algorithm on all the detected topics and label them with label-indices.
The final approval and generation of stable topics is done in ``_generate_stable_topics()``.
Parameters
----------
eps : float
dbscan distance scale
min_samples : int, optional
defaults to ``int(self.num_models / 2)``, dbscan min neighbours threshold required to consider
a topic to be a core. Should scale with the number of models, ``self.num_models``
"""
if min_samples is None:
min_samples = int(self.num_models / 2)
logger.info("fitting the clustering model, using %s for min_samples", min_samples)
else:
logger.info("fitting the clustering model")
self.cluster_model = CBDBSCAN(eps=eps, min_samples=min_samples)
self.cluster_model.fit(self.asymmetric_distance_matrix)
def _generate_stable_topics(self, min_cores=None):
"""Generate stable topics out of the clusters.
The function finds clusters of topics using a variant of DBScan. If a cluster has enough core topics
(c.f. parameter ``min_cores``), then this cluster represents a stable topic. The stable topic is specifically
calculated as the average over all topic-term distributions of the core topics in the cluster.
This function is the last step that has to be done in the ensemble. After this step is complete,
Stable topics can be retrieved afterwards using the :meth:`~gensim.models.ensemblelda.EnsembleLda.get_topics`
method.
Parameters
----------
min_cores : int
Minimum number of core topics needed to form a cluster that represents a stable topic.
Using ``None`` defaults to ``min_cores = min(3, max(1, int(self.num_models /4 +1)))``
"""
# min_cores being 0 makes no sense. there has to be a core for a cluster
# or there is no cluster
if min_cores == 0:
min_cores = 1
if min_cores is None:
# min_cores is a number between 1 and 3, depending on the number of models
min_cores = min(3, max(1, int(self.num_models / 4 + 1)))
logger.info("generating stable topics, using %s for min_cores", min_cores)
else:
logger.info("generating stable topics")
cbdbscan_topics = self.cluster_model.results
grouped_by_labels = _group_by_labels(cbdbscan_topics)
clusters = _aggregate_topics(grouped_by_labels)
valid_clusters = _validate_clusters(clusters, min_cores)
valid_cluster_labels = {cluster.label for cluster in valid_clusters}
for topic in cbdbscan_topics:
topic.valid_neighboring_labels = {
label for label in topic.neighboring_labels
if label in valid_cluster_labels
}
# keeping only VALID cores
valid_core_mask = np.vectorize(_is_valid_core)(cbdbscan_topics)
valid_topics = self.ttda[valid_core_mask]
topic_labels = np.array([topic.label for topic in cbdbscan_topics])[valid_core_mask]
unique_labels = np.unique(topic_labels)
num_stable_topics = len(unique_labels)
stable_topics = np.empty((num_stable_topics, len(self.id2word)))
# for each cluster
for label_index, label in enumerate(unique_labels):
# mean of all the topics that are of that cluster
topics_of_cluster = np.array([topic for t, topic in enumerate(valid_topics) if topic_labels[t] == label])
stable_topics[label_index] = topics_of_cluster.mean(axis=0)
self.valid_clusters = valid_clusters
self.stable_topics = stable_topics
logger.info("found %s stable topics", len(stable_topics))
def recluster(self, eps=0.1, min_samples=None, min_cores=None):
"""Reapply CBDBSCAN clustering and stable topic generation.
Stable topics can be retrieved using :meth:`~gensim.models.ensemblelda.EnsembleLda.get_topics`.
Parameters
----------
eps : float
epsilon for the CBDBSCAN algorithm, having the same meaning as in classic DBSCAN clustering.
default: ``0.1``
min_samples : int
The minimum number of samples in the neighborhood of a topic to be considered a core in CBDBSCAN.
default: ``int(self.num_models / 2)``
min_cores : int
how many cores a cluster has to have, to be treated as stable topic. That means, how many topics
that look similar have to be present, so that the average topic in those is used as stable topic.
default: ``min(3, max(1, int(self.num_models /4 +1)))``
"""
# if new models were added to the ensemble, the distance matrix needs to be generated again
if self.asymmetric_distance_matrix_outdated:
logger.info("asymmetric distance matrix is outdated due to add_model")
self._generate_asymmetric_distance_matrix()
# Run CBDBSCAN to get topic clusters:
self._generate_topic_clusters(eps, min_samples)
# Interpret the results of CBDBSCAN to identify stable topics:
self._generate_stable_topics(min_cores)
# Create gensim LdaModel representation of topic model with stable topics (can be used for inference):
self.generate_gensim_representation()
# GENSIM API
# to make using the ensemble in place of a gensim model as easy as possible
def get_topics(self):
"""Return only the stable topics from the ensemble.
Returns
-------
2D Numpy.numpy.ndarray of floats
List of stable topic term distributions
"""
return self.stable_topics
def _ensure_gensim_representation(self):
"""Check if stable topics and the internal gensim representation exist. Raise an error if not."""
if self.classic_model_representation is None:
if len(self.stable_topics) == 0:
raise ValueError("no stable topic was detected")
else:
raise ValueError("use generate_gensim_representation() first")
def __getitem__(self, i):
"""See :meth:`gensim.models.LdaModel.__getitem__`."""
self._ensure_gensim_representation()
return self.classic_model_representation[i]
def inference(self, *posargs, **kwargs):
"""See :meth:`gensim.models.LdaModel.inference`."""
self._ensure_gensim_representation()
return self.classic_model_representation.inference(*posargs, **kwargs)
def log_perplexity(self, *posargs, **kwargs):
"""See :meth:`gensim.models.LdaModel.log_perplexity`."""
self._ensure_gensim_representation()
return self.classic_model_representation.log_perplexity(*posargs, **kwargs)
def print_topics(self, *posargs, **kwargs):
"""See :meth:`gensim.models.LdaModel.print_topics`."""
self._ensure_gensim_representation()
return self.classic_model_representation.print_topics(*posargs, **kwargs)
@property
def id2word(self):
"""Return the :py:class:`gensim.corpora.dictionary.Dictionary` object used in the model."""
return self.gensim_kw_args["id2word"]
class CBDBSCAN:
"""A Variation of the DBSCAN algorithm called Checkback DBSCAN (CBDBSCAN).
The algorithm works based on DBSCAN-like parameters 'eps' and 'min_samples' that respectively define how far a
"nearby" point is, and the minimum number of nearby points needed to label a candidate datapoint a core of a
cluster. (See https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html).
The algorithm works as follows:
1. (A)symmetric distance matrix provided at fit-time (called 'amatrix').
For the sake of example below, assume the there are only five topics (amatrix contains distances with dim 5x5),
T_1, T_2, T_3, T_4, T_5:
2. Start by scanning a candidate topic with respect to a parent topic
(e.g. T_1 with respect to parent None)
3. Check which topics are nearby the candidate topic using 'self.eps' as a threshold and call them neighbours
(e.g. assume T_3, T_4, and T_5 are nearby and become neighbours)
4. If there are more neighbours than 'self.min_samples', the candidate topic becomes a core candidate for a cluster
(e.g. if 'min_samples'=1, then T_1 becomes the first core of a cluster)
5. If candidate is a core, CheckBack (CB) to find the fraction of neighbours that are either the parent or the
parent's neighbours. If this fraction is more than 75%, give the candidate the same label as its parent.
(e.g. in the trivial case there is no parent (or neighbours of that parent), a new incremental label is given)
6. If candidate is a core, recursively scan the next nearby topic (e.g. scan T_3) labeling the previous topic as
the parent and the previous neighbours as the parent_neighbours - repeat steps 2-6:
2. (e.g. Scan candidate T_3 with respect to parent T_1 that has parent_neighbours T_3, T_4, and T_5)
3. (e.g. T5 is the only neighbour)
4. (e.g. number of neighbours is 1, therefore candidate T_3 becomes a core)
5. (e.g. CheckBack finds that two of the four parent and parent neighbours are neighbours of candidate T_3.
Therefore the candidate T_3 does NOT get the same label as its parent T_1)
6. (e.g. Scan candidate T_5 with respect to parent T_3 that has parent_neighbours T_5)
The CB step has the effect that it enforces cluster compactness and allows the model to avoid creating clusters for
unstable topics made of a composition of multiple stable topics.
"""
def __init__(self, eps, min_samples):
"""Create a new CBDBSCAN object. Call fit in order to train it on an asymmetric distance matrix.
Parameters
----------
eps : float
epsilon for the CBDBSCAN algorithm, having the same meaning as in classic DBSCAN clustering.
min_samples : int
The minimum number of samples in the neighborhood of a topic to be considered a core in CBDBSCAN.
"""
self.eps = eps
self.min_samples = min_samples
def fit(self, amatrix):
"""Apply the algorithm to an asymmetric distance matrix."""
self.next_label = 0
topic_clustering_results = [
Topic(
is_core=False,
neighboring_labels=set(),
neighboring_topic_indices=set(),
label=None,
num_neighboring_labels=0,
valid_neighboring_labels=set()
) for i in range(len(amatrix))
]
amatrix_copy = amatrix.copy()
# to avoid the problem of comparing the topic with itself
np.fill_diagonal(amatrix_copy, 1)
min_distance_per_topic = [(distance, index) for index, distance in enumerate(amatrix_copy.min(axis=1))]
min_distance_per_topic_sorted = sorted(min_distance_per_topic, key=lambda distance: distance[0])
ordered_min_similarity = [index for distance, index in min_distance_per_topic_sorted]
def scan_topic(topic_index, current_label=None, parent_neighbors=None):
"""Extend the cluster in one direction.
Results are accumulated to ``self.results``.
Parameters
----------
topic_index : int
The topic that might be added to the existing cluster, or which might create a new cluster if necessary.
current_label : int
The label of the cluster that might be suitable for ``topic_index``
"""
neighbors_sorted = sorted(
[
(distance, index)
for index, distance in enumerate(amatrix_copy[topic_index])
],
key=lambda x: x[0],
)
neighboring_topic_indices = [index for distance, index in neighbors_sorted if distance < self.eps]
num_neighboring_topics = len(neighboring_topic_indices)
# If the number of neighbor indices of a topic is large enough, it is considered a core.
# This also takes neighbor indices that already are identified as core in count.
if num_neighboring_topics >= self.min_samples:
# This topic is a core!
topic_clustering_results[topic_index].is_core = True
# if current_label is none, then this is the first core
# of a new cluster (hence next_label is used)
if current_label is None:
# next_label is initialized with 0 in fit() for the first cluster
current_label = self.next_label
self.next_label += 1
else:
# In case the core has a parent, check the distance to the parents neighbors (since the matrix is
# asymmetric, it takes return distances into account here)
# If less than 25% of the elements are close enough, then create a new cluster rather than further
# growing the current cluster in that direction.
close_parent_neighbors_mask = amatrix_copy[topic_index][parent_neighbors] < self.eps
if close_parent_neighbors_mask.mean() < 0.25:
# start new cluster by changing current_label
current_label = self.next_label
self.next_label += 1
topic_clustering_results[topic_index].label = current_label
for neighboring_topic_index in neighboring_topic_indices:
if topic_clustering_results[neighboring_topic_index].label is None:
ordered_min_similarity.remove(neighboring_topic_index)
# try to extend the cluster into the direction of the neighbor
scan_topic(neighboring_topic_index, current_label, neighboring_topic_indices + [topic_index])
topic_clustering_results[neighboring_topic_index].neighboring_topic_indices.add(topic_index)
topic_clustering_results[neighboring_topic_index].neighboring_labels.add(current_label)
else:
# this topic is not a core!
if current_label is None:
topic_clustering_results[topic_index].label = -1
else:
topic_clustering_results[topic_index].label = current_label
# elements are going to be removed from that array in scan_topic, do until it is empty
while len(ordered_min_similarity) != 0:
next_topic_index = ordered_min_similarity.pop(0)
scan_topic(next_topic_index)
self.results = topic_clustering_results
| 59,387
|
Python
|
.py
| 1,076
| 45.374535
| 120
| 0.655716
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,105
|
word2vec.py
|
piskvorky_gensim/gensim/models/word2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gensim Contributors
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
Introduction
============
This module implements the word2vec family of algorithms, using highly optimized C routines,
data streaming and Pythonic interfaces.
The word2vec algorithms include skip-gram and CBOW models, using either
hierarchical softmax or negative sampling: `Tomas Mikolov et al: Efficient Estimation of Word Representations
in Vector Space <https://arxiv.org/pdf/1301.3781.pdf>`_, `Tomas Mikolov et al: Distributed Representations of Words
and Phrases and their Compositionality <https://arxiv.org/abs/1310.4546>`_.
Other embeddings
================
There are more ways to train word vectors in Gensim than just Word2Vec.
See also :class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText`.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality and
`optimizations <https://rare-technologies.com/parallelizing-word2vec-in-python/>`_ over the years.
For a tutorial on Gensim word2vec, with an interactive web app trained on GoogleNews,
visit https://rare-technologies.com/word2vec-tutorial/.
Usage examples
==============
Initialize a model with e.g.:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>>
>>> model = Word2Vec(sentences=common_texts, vector_size=100, window=5, min_count=1, workers=4)
>>> model.save("word2vec.model")
**The training is streamed, so ``sentences`` can be an iterable**, reading input data
from the disk or network on-the-fly, without loading your entire corpus into RAM.
Note the ``sentences`` iterable must be *restartable* (not just a generator), to allow the algorithm
to stream over your dataset multiple times. For some examples of streamed iterables,
see :class:`~gensim.models.word2vec.BrownCorpus`,
:class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence`.
If you save the model you can continue training it later:
.. sourcecode:: pycon
>>> model = Word2Vec.load("word2vec.model")
>>> model.train([["hello", "world"]], total_examples=1, epochs=1)
(0, 2)
The trained word vectors are stored in a :class:`~gensim.models.keyedvectors.KeyedVectors` instance, as `model.wv`:
.. sourcecode:: pycon
>>> vector = model.wv['computer'] # get numpy vector of a word
>>> sims = model.wv.most_similar('computer', topn=10) # get other similar words
The reason for separating the trained vectors into `KeyedVectors` is that if you don't
need the full model state any more (don't need to continue training), its state can be discarded,
keeping just the vectors and their keys proper.
This results in a much smaller and faster object that can be mmapped for lightning
fast loading and sharing the vectors in RAM between processes:
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>>
>>> # Store just the words + their trained embeddings.
>>> word_vectors = model.wv
>>> word_vectors.save("word2vec.wordvectors")
>>>
>>> # Load back with memory-mapping = read-only, shared across processes.
>>> wv = KeyedVectors.load("word2vec.wordvectors", mmap='r')
>>>
>>> vector = wv['computer'] # Get numpy vector of a word
Gensim can also load word vectors in the "word2vec C format", as a
:class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Load a word2vec model stored in the C *text* format.
>>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False)
>>> # Load a word2vec model stored in the C *binary* format.
>>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True)
It is impossible to continue training the vectors loaded from the C format because the hidden weights,
vocabulary frequencies and the binary tree are missing. To continue training, you'll need the
full :class:`~gensim.models.word2vec.Word2Vec` object state, as stored by :meth:`~gensim.models.word2vec.Word2Vec.save`,
not just the :class:`~gensim.models.keyedvectors.KeyedVectors`.
You can perform various NLP tasks with a trained model. Some of the operations
are already built-in - see :mod:`gensim.models.keyedvectors`.
If you're finished training a model (i.e. no more updates, only querying),
you can switch to the :class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> word_vectors = model.wv
>>> del model
to trim unneeded model state = use much less RAM and allow fast loading and memory sharing (mmap).
Embeddings with multiword ngrams
================================
There is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word, using collocation statistics.
Using phrases, you can learn a word2vec model where "words" are actually multiword expressions,
such as `new_york_times` or `financial_crisis`:
.. sourcecode:: pycon
>>> from gensim.models import Phrases
>>>
>>> # Train a bigram detector.
>>> bigram_transformer = Phrases(common_texts)
>>>
>>> # Apply the trained MWE detector to a corpus, using the result to train a Word2vec model.
>>> model = Word2Vec(bigram_transformer[common_texts], min_count=1)
Pretrained models
=================
Gensim comes with several already pre-trained models, in the
`Gensim-data repository <https://github.com/RaRe-Technologies/gensim-data>`_:
.. sourcecode:: pycon
>>> import gensim.downloader
>>> # Show all available models in gensim-data
>>> print(list(gensim.downloader.info()['models'].keys()))
['fasttext-wiki-news-subwords-300',
'conceptnet-numberbatch-17-06-300',
'word2vec-ruscorpora-300',
'word2vec-google-news-300',
'glove-wiki-gigaword-50',
'glove-wiki-gigaword-100',
'glove-wiki-gigaword-200',
'glove-wiki-gigaword-300',
'glove-twitter-25',
'glove-twitter-50',
'glove-twitter-100',
'glove-twitter-200',
'__testing_word2vec-matrix-synopsis']
>>>
>>> # Download the "glove-twitter-25" embeddings
>>> glove_vectors = gensim.downloader.load('glove-twitter-25')
>>>
>>> # Use the downloaded vectors as usual:
>>> glove_vectors.most_similar('twitter')
[('facebook', 0.948005199432373),
('tweet', 0.9403423070907593),
('fb', 0.9342358708381653),
('instagram', 0.9104824066162109),
('chat', 0.8964964747428894),
('hashtag', 0.8885937333106995),
('tweets', 0.8878158330917358),
('tl', 0.8778461217880249),
('link', 0.8778210878372192),
('internet', 0.8753897547721863)]
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from types import GeneratorType
import threading
import itertools
import copy
from queue import Queue, Empty
from numpy import float32 as REAL
import numpy as np
from gensim.utils import keep_vocab_item, call_on_class_only, deprecated
from gensim.models.keyedvectors import KeyedVectors, pseudorandom_weak_vector
from gensim import utils, matutils
# This import is required by pickle to load models stored by Gensim < 4.0, such as Gensim 3.8.3.
from gensim.models.keyedvectors import Vocab # noqa
from smart_open.compression import get_supported_extensions
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import ( # noqa: F401
train_batch_sg,
train_batch_cbow,
score_sentence_sg,
score_sentence_cbow,
MAX_WORDS_IN_BATCH,
FAST_VERSION,
)
except ImportError:
raise utils.NO_CYTHON
try:
from gensim.models.word2vec_corpusfile import train_epoch_sg, train_epoch_cbow, CORPUSFILE_VERSION
except ImportError:
# file-based word2vec is not supported
CORPUSFILE_VERSION = -1
def train_epoch_sg(
model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss,
):
raise RuntimeError("Training with corpus_file argument is not supported")
def train_epoch_cbow(
model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss,
):
raise RuntimeError("Training with corpus_file argument is not supported")
class Word2Vec(utils.SaveLoad):
def __init__(
self, sentences=None, corpus_file=None, vector_size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, ns_exponent=0.75, cbow_mean=1, hashfxn=hash, epochs=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False, callbacks=(),
comment=None, max_final_vocab=None, shrink_windows=True,
):
"""Train, use and evaluate neural networks described in https://code.google.com/p/word2vec/.
Once you're finished training a model (=no more updates, only querying)
store and use only the :class:`~gensim.models.keyedvectors.KeyedVectors` instance in ``self.wv``
to reduce memory.
The full model can be stored/loaded via its :meth:`~gensim.models.word2vec.Word2Vec.save` and
:meth:`~gensim.models.word2vec.Word2Vec.load` methods.
The trained word vectors can also be stored/loaded from a format compatible with the
original word2vec implementation via `self.wv.save_word2vec_format`
and :meth:`gensim.models.keyedvectors.KeyedVectors.load_word2vec_format`.
Parameters
----------
sentences : iterable of iterables, optional
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left uninitialized).
vector_size : int, optional
Dimensionality of the word vectors.
window : int, optional
Maximum distance between the current and predicted word within a sentence.
min_count : int, optional
Ignores all words with total frequency lower than this.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
sg : {0, 1}, optional
Training algorithm: 1 for skip-gram; otherwise CBOW.
hs : {0, 1}, optional
If 1, hierarchical softmax will be used for model training.
If 0, hierarchical softmax will not be used for model training.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If 0, negative sampling will not be used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {0, 1}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
max_final_vocab : int, optional
Limits the vocab to a target vocab size by automatically picking a matching min_count. If the specified
min_count is more than the calculated min_count, the specified min_count will be used.
Set to `None` if not required.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
epochs : int, optional
Number of iterations (epochs) over the corpus. (Formerly: `iter`)
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part of the
model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
sorted_vocab : {0, 1}, optional
If 1, sort the vocabulary by descending frequency before assigning word indexes.
See :meth:`~gensim.models.keyedvectors.KeyedVectors.sort_by_descending_frequency()`.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
shrink_windows : bool, optional
New in 4.1. Experimental.
If True, the effective window size is uniformly sampled from [1, `window`]
for each target word during training, to match the original word2vec algorithm's
approximate weighting of context words by distance. Otherwise, the effective
window size is always fixed to `window` words to either side.
Examples
--------
Initialize and train a :class:`~gensim.models.word2vec.Word2Vec` model
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> model = Word2Vec(sentences, min_count=1)
Attributes
----------
wv : :class:`~gensim.models.keyedvectors.KeyedVectors`
This object essentially contains the mapping between words and embeddings. After training, it can be used
directly to query those embeddings in various ways. See the module level docstring for examples.
"""
corpus_iterable = sentences
self.vector_size = int(vector_size)
self.workers = int(workers)
self.epochs = epochs
self.train_count = 0
self.total_train_time = 0
self.batch_words = batch_words
self.sg = int(sg)
self.alpha = float(alpha)
self.min_alpha = float(min_alpha)
self.window = int(window)
self.shrink_windows = bool(shrink_windows)
self.random = np.random.RandomState(seed)
self.hs = int(hs)
self.negative = int(negative)
self.ns_exponent = ns_exponent
self.cbow_mean = int(cbow_mean)
self.compute_loss = bool(compute_loss)
self.running_training_loss = 0
self.min_alpha_yet_reached = float(alpha)
self.corpus_count = 0
self.corpus_total_words = 0
self.max_final_vocab = max_final_vocab
self.max_vocab_size = max_vocab_size
self.min_count = min_count
self.sample = sample
self.sorted_vocab = sorted_vocab
self.null_word = null_word
self.cum_table = None # for negative sampling
self.raw_vocab = None
if not hasattr(self, 'wv'): # set unless subclass already set (eg: FastText)
self.wv = KeyedVectors(vector_size)
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as desired after any vocab growth
self.wv.vectors_lockf = np.ones(1, dtype=REAL) # 0.0 values suppress word-backprop-updates; 1.0 allows
self.hashfxn = hashfxn
self.seed = seed
if not hasattr(self, 'layer1_size'): # set unless subclass already set (as for Doc2Vec dm_concat mode)
self.layer1_size = vector_size
self.comment = comment
self.load = call_on_class_only
if corpus_iterable is not None or corpus_file is not None:
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=(epochs + 1))
self.build_vocab(corpus_iterable=corpus_iterable, corpus_file=corpus_file, trim_rule=trim_rule)
self.train(
corpus_iterable=corpus_iterable, corpus_file=corpus_file, total_examples=self.corpus_count,
total_words=self.corpus_total_words, epochs=self.epochs, start_alpha=self.alpha,
end_alpha=self.min_alpha, compute_loss=self.compute_loss, callbacks=callbacks)
else:
if trim_rule is not None:
logger.warning(
"The rule, if given, is only used to prune vocabulary during build_vocab() "
"and is not stored as part of the model. Model initialized without sentences. "
"trim_rule provided, if any, will be ignored.")
if callbacks:
logger.warning(
"Callbacks are no longer retained by the model, so must be provided whenever "
"training is triggered, as in initialization with a corpus or calling `train()`. "
"The callbacks provided in this initialization without triggering train will "
"be ignored.")
self.add_lifecycle_event("created", params=str(self))
def build_vocab(
self, corpus_iterable=None, corpus_file=None, update=False, progress_per=10000,
keep_raw_vocab=False, trim_rule=None, **kwargs,
):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Parameters
----------
corpus_iterable : iterable of list of str
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` module for such examples.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (not both of them).
update : bool
If true, the new words in `sentences` will be added to model's vocab.
progress_per : int, optional
Indicates how many words to process before showing/updating the progress.
keep_raw_vocab : bool, optional
If False, the raw vocabulary will be deleted after the scaling is done to free up RAM.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
**kwargs : object
Keyword arguments propagated to `self.prepare_vocab`.
"""
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=1)
total_words, corpus_count = self.scan_vocab(
corpus_iterable=corpus_iterable, corpus_file=corpus_file, progress_per=progress_per, trim_rule=trim_rule)
self.corpus_count = corpus_count
self.corpus_total_words = total_words
report_values = self.prepare_vocab(update=update, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, **kwargs)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update)
self.add_lifecycle_event("build_vocab", update=update, trim_rule=str(trim_rule))
def build_vocab_from_freq(
self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False,
):
"""Build vocabulary from a dictionary of word frequencies.
Parameters
----------
word_freq : dict of (str, int)
A mapping from a word in the vocabulary to its frequency count.
keep_raw_vocab : bool, optional
If False, delete the raw vocabulary after the scaling is done to free up RAM.
corpus_count : int, optional
Even if no corpus is provided, this argument can set corpus_count explicitly.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
update : bool, optional
If true, the new provided words in `word_freq` dict will be added to model's vocab.
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i unique word types, with total frequency of %i",
len(raw_vocab), sum(raw_vocab.values()),
)
# Since no sentences are provided, this is to control the corpus_count.
self.corpus_count = corpus_count or 0
self.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
report_values = self.prepare_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update) # build tables & arrays
def _scan_vocab(self, sentences, progress_per, trim_rule):
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, str):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence),
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab),
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=10000, workers=None, trim_rule=None):
logger.info("collecting all words and their counts")
if corpus_file:
corpus_iterable = LineSentence(corpus_file)
total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule)
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(self.raw_vocab), total_words, corpus_count
)
return total_words, corpus_count
def prepare_vocab(
self, update=False, keep_raw_vocab=False, trim_rule=None,
min_count=None, sample=None, dry_run=False,
):
"""Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
# set effective_min_count to min_count in case max_final_vocab isn't set
self.effective_min_count = min_count
# If max_final_vocab is specified instead of min_count,
# pick a min_count which satisfies max_final_vocab as well as possible.
if self.max_final_vocab is not None:
sorted_vocab = sorted(self.raw_vocab.keys(), key=lambda word: self.raw_vocab[word], reverse=True)
calc_min_count = 1
if self.max_final_vocab < len(sorted_vocab):
calc_min_count = self.raw_vocab[sorted_vocab[self.max_final_vocab]] + 1
self.effective_min_count = max(calc_min_count, min_count)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"max_final_vocab={self.max_final_vocab} and min_count={min_count} resulted "
f"in calc_min_count={calc_min_count}, effective_min_count={self.effective_min_count}"
)
)
if not update:
logger.info("Creating a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.wv.index_to_key = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.wv.key_to_index = {}
for word, v in self.raw_vocab.items():
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.wv.key_to_index[word] = len(self.wv.index_to_key)
self.wv.index_to_key.append(word)
else:
drop_unique += 1
drop_total += v
if not dry_run:
# now update counts
for word in self.wv.index_to_key:
self.wv.set_vecattr(word, 'count', self.raw_vocab[word])
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"effective_min_count={self.effective_min_count} retains {len(retain_words)} unique "
f"words ({retain_unique_pct:.2f}% of original {original_unique_total}, drops {drop_unique})"
),
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"effective_min_count={self.effective_min_count} leaves {retain_total} word corpus "
f"({retain_pct:.2f}% of original {original_total}, drops {drop_total})"
),
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = []
pre_exist_words = []
for word, v in self.raw_vocab.items():
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
if self.wv.has_index_for(word):
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
pass
else:
new_words.append(word)
new_total += v
if not dry_run:
self.wv.key_to_index[word] = len(self.wv.index_to_key)
self.wv.index_to_key.append(word)
else:
drop_unique += 1
drop_total += v
if not dry_run:
# now update counts
self.wv.allocate_vecattrs(attrs=['count'], types=[type(0)])
for word in self.wv.index_to_key:
self.wv.set_vecattr(word, 'count', self.wv.get_vecattr(word, 'count') + self.raw_vocab.get(word, 0))
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"added {len(new_words)} new unique words ({new_unique_pct:.2f}% of original "
f"{original_unique_total}) and increased the count of {len(pre_exist_words)} "
f"pre-existing words ({pre_exist_unique_pct:.2f}% of original {original_unique_total})"
),
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + np.sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (np.sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.wv.set_vecattr(w, 'sample_int', np.uint32(word_probability * (2**32 - 1)))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"downsampling leaves estimated {downsample_total} word corpus "
f"({downsample_total * 100.0 / max(retain_total, 1):.1f}%% of prior {retain_total})"
),
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'num_retained_words': len(retain_words)
}
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
self.add_null_word()
if self.sorted_vocab and not update:
self.wv.sort_by_descending_frequency()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
return report_values
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size.
Parameters
----------
vocab_size : int, optional
Number of unique tokens in the vocabulary
report : dict of (str, int), optional
A dictionary from string representations of the model's memory consuming members to their size in bytes.
Returns
-------
dict of (str, int)
A dictionary from string representations of the model's memory consuming members to their size in bytes.
"""
vocab_size = vocab_size or len(self.wv)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['vectors'] = vocab_size * self.vector_size * np.dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * np.dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * np.dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'],
)
return report
def add_null_word(self):
word = '\0'
self.wv.key_to_index[word] = len(self.wv)
self.wv.index_to_key.append(word)
self.wv.set_vecattr(word, 'count', 1)
def create_binary_tree(self):
"""Create a `binary Huffman tree <https://en.wikipedia.org/wiki/Huffman_coding>`_ using stored vocabulary
word counts. Frequent words will have shorter binary codes.
Called internally from :meth:`~gensim.models.word2vec.Word2VecVocab.build_vocab`.
"""
_assign_binary_codes(self.wv)
def make_cum_table(self, domain=2**31 - 1):
"""Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]),
then finding that integer's sorted insertion point (as if by `bisect_left` or `ndarray.searchsorted()`).
That insertion point is the drawn index, coming up in proportion equal to the increment at that slot.
"""
vocab_size = len(self.wv.index_to_key)
self.cum_table = np.zeros(vocab_size, dtype=np.uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in range(vocab_size):
count = self.wv.get_vecattr(word_index, 'count')
train_words_pow += count**float(self.ns_exponent)
cumulative = 0.0
for word_index in range(vocab_size):
count = self.wv.get_vecattr(word_index, 'count')
cumulative += count**float(self.ns_exponent)
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def prepare_weights(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
# set initial input/projection and hidden weights
if not update:
self.init_weights()
else:
self.update_weights()
@deprecated("Use gensim.models.keyedvectors.pseudorandom_weak_vector() directly")
def seeded_vector(self, seed_string, vector_size):
return pseudorandom_weak_vector(vector_size, seed_string=seed_string, hashfxn=self.hashfxn)
def init_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.wv.resize_vectors(seed=self.seed)
if self.hs:
self.syn1 = np.zeros((len(self.wv), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = np.zeros((len(self.wv), self.layer1_size), dtype=REAL)
def update_weights(self):
"""Copy all the existing weights, and reset the weights for the newly added vocabulary."""
logger.info("updating layer weights")
# Raise an error if an online update is run before initial training on a corpus
if not len(self.wv.vectors):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
preresize_count = len(self.wv.vectors)
self.wv.resize_vectors(seed=self.seed)
gained_vocab = len(self.wv.vectors) - preresize_count
if self.hs:
self.syn1 = np.vstack([self.syn1, np.zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
pad = np.zeros((gained_vocab, self.layer1_size), dtype=REAL)
self.syn1neg = np.vstack([self.syn1neg, pad])
@deprecated(
"Gensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. "
"init_sims() is now obsoleted and will be completely removed in future versions. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors. Obsoleted.
If you need a single unit-normalized vector for some key, call
:meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
``word2vec_model.wv.get_vector(key, norm=True)``.
To refresh norms after you performed some atypical out-of-band vector tampering,
call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.
Parameters
----------
replace : bool
If True, forget the original trained vectors and only keep the normalized ones.
You lose information if you do this.
"""
self.wv.init_sims(replace=replace)
def _do_train_epoch(
self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, **kwargs,
):
work, neu1 = thread_private_mem
if self.sg:
examples, tally, raw_tally = train_epoch_sg(
self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss
)
else:
examples, tally, raw_tally = train_epoch_cbow(
self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss
)
return examples, tally, raw_tally
def _do_train_job(self, sentences, alpha, inits):
"""Train the model on a single batch of sentences.
Parameters
----------
sentences : iterable of list of str
Corpus chunk to be used in this training batch.
alpha : float
The learning rate used in this batch.
inits : (np.ndarray, np.ndarray)
Each worker threads private work memory.
Returns
-------
(int, int)
2-tuple (effective word count after ignoring unknown words and sentence length trimming, total word count).
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _clear_post_train(self):
"""Clear any cached values that training may have invalidated."""
self.wv.norms = None
def train(
self, corpus_iterable=None, corpus_file=None, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=False, callbacks=(),
**kwargs,
):
"""Update the model's neural weights from a sequence of sentences.
Notes
-----
To support linear learning-rate decay from (initial) `alpha` to `min_alpha`, and accurate
progress-percentage logging, either `total_examples` (count of sentences) or `total_words` (count of
raw words in sentences) **MUST** be provided. If `sentences` is the same corpus
that was provided to :meth:`~gensim.models.word2vec.Word2Vec.build_vocab` earlier,
you can simply use `total_examples=self.corpus_count`.
Warnings
--------
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case
where :meth:`~gensim.models.word2vec.Word2Vec.train` is only called once, you can set `epochs=self.epochs`.
Parameters
----------
corpus_iterable : iterable of list of str
The ``corpus_iterable`` can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network, to limit RAM usage.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (not both of them).
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float, optional
Initial learning rate. If supplied, replaces the starting `alpha` from the constructor,
for this one call to`train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
end_alpha : float, optional
Final learning rate. Drops linearly from `start_alpha`.
If supplied, this replaces the final `min_alpha` from the constructor, for this one call to `train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
word_count : int, optional
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = Word2Vec(min_count=1)
>>> model.build_vocab(sentences) # prepare the model vocabulary
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) # train word vectors
(1, 30)
"""
self.alpha = start_alpha or self.alpha
self.min_alpha = end_alpha or self.min_alpha
self.epochs = epochs
self._check_training_sanity(epochs=epochs, total_examples=total_examples, total_words=total_words)
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=epochs)
self.add_lifecycle_event(
"train",
msg=(
f"training model with {self.workers} workers on {len(self.wv)} vocabulary and "
f"{self.layer1_size} features, using sg={self.sg} hs={self.hs} sample={self.sample} "
f"negative={self.negative} window={self.window} shrink_windows={self.shrink_windows}"
),
)
self.compute_loss = compute_loss
self.running_training_loss = 0.0
for callback in callbacks:
callback.on_train_begin(self)
trained_word_count = 0
raw_word_count = 0
start = default_timer() - 0.00001
job_tally = 0
for cur_epoch in range(self.epochs):
for callback in callbacks:
callback.on_epoch_begin(self)
if corpus_iterable is not None:
trained_word_count_epoch, raw_word_count_epoch, job_tally_epoch = self._train_epoch(
corpus_iterable, cur_epoch=cur_epoch, total_examples=total_examples,
total_words=total_words, queue_factor=queue_factor, report_delay=report_delay,
callbacks=callbacks, **kwargs)
else:
trained_word_count_epoch, raw_word_count_epoch, job_tally_epoch = self._train_epoch_corpusfile(
corpus_file, cur_epoch=cur_epoch, total_examples=total_examples, total_words=total_words,
callbacks=callbacks, **kwargs)
trained_word_count += trained_word_count_epoch
raw_word_count += raw_word_count_epoch
job_tally += job_tally_epoch
for callback in callbacks:
callback.on_epoch_end(self)
# Log overall time
total_elapsed = default_timer() - start
self._log_train_end(raw_word_count, trained_word_count, total_elapsed, job_tally)
self.train_count += 1 # number of times train() has been called
self._clear_post_train()
for callback in callbacks:
callback.on_train_end(self)
return trained_word_count, raw_word_count
def _worker_loop_corpusfile(
self, corpus_file, thread_id, offset, cython_vocab, progress_queue, cur_epoch=0,
total_examples=None, total_words=None, **kwargs,
):
"""Train the model on a `corpus_file` in LineSentence format.
This function will be called in parallel by multiple workers (threads or processes) to make
optimal use of multicore machines.
Parameters
----------
corpus_file : str
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
thread_id : int
Thread index starting from 0 to `number of workers - 1`.
offset : int
Offset (in bytes) in the `corpus_file` for particular worker.
cython_vocab : :class:`~gensim.models.word2vec_inner.CythonVocab`
Copy of the vocabulary in order to access it without GIL.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
**kwargs : object
Additional key word parameters for the specific model inheriting from this class.
"""
thread_private_mem = self._get_thread_working_mem()
examples, tally, raw_tally = self._do_train_epoch(
corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=total_examples, total_words=total_words, **kwargs)
progress_queue.put((examples, tally, raw_tally))
progress_queue.put(None)
def _worker_loop(self, job_queue, progress_queue):
"""Train the model, lifting batches of data from the queue.
This function will be called in parallel by multiple workers (threads or processes) to make
optimal use of multicore machines.
Parameters
----------
job_queue : Queue of (list of objects, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
thread_private_mem = self._get_thread_working_mem()
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
data_iterable, alpha = job
tally, raw_tally = self._do_train_job(data_iterable, alpha, thread_private_mem)
progress_queue.put((len(data_iterable), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def _job_producer(self, data_iterator, job_queue, cur_epoch=0, total_examples=None, total_words=None):
"""Fill the jobs queue using the data found in the input stream.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is a dictionary of parameters.
Parameters
----------
data_iterator : iterable of list of objects
The input dataset. This will be split in chunks and these chunks will be pushed to the queue.
job_queue : Queue of (list of object, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus. Used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus. Used to log progress.
"""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = self._get_next_alpha(0.0, cur_epoch)
job_no = 0
for data_idx, data in enumerate(data_iterator):
data_length = self._raw_word_count([data])
# can we fit this sentence into the existing job batch?
if batch_size + data_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(data)
batch_size += data_length
else:
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
epoch_progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
epoch_progress = 1.0 * pushed_words / total_words
next_alpha = self._get_next_alpha(epoch_progress, cur_epoch)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [data], data_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in range(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
def _log_epoch_progress(
self, progress_queue=None, job_queue=None, cur_epoch=0, total_examples=None,
total_words=None, report_delay=1.0, is_corpus_file_mode=None,
):
"""Get the progress report for a single training epoch.
Parameters
----------
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
job_queue : Queue of (list of object, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus. Used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus. Used to log progress.
report_delay : float, optional
Number of seconds between two consecutive progress report messages in the logger.
is_corpus_file_mode : bool, optional
Whether training is file-based (corpus_file argument) or not.
Returns
-------
(int, int, int)
The epoch report consisting of three elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
example_count, trained_word_count, raw_word_count = 0, 0, 0
start, next_report = default_timer() - 0.00001, 1.0
job_tally = 0
unfinished_worker_count = self.workers
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.debug("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
self._log_progress(
job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed)
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
self._log_epoch_end(
cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed, is_corpus_file_mode)
self.total_train_time += elapsed
return trained_word_count, raw_word_count, job_tally
def _train_epoch_corpusfile(
self, corpus_file, cur_epoch=0, total_examples=None, total_words=None, callbacks=(), **kwargs,
):
"""Train the model for a single epoch.
Parameters
----------
corpus_file : str
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus, used to log progress.
total_words : int
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus, used to log progress. Must be provided in order to seek in `corpus_file`.
**kwargs : object
Additional key word parameters for the specific model inheriting from this class.
Returns
-------
(int, int, int)
The training report for this epoch consisting of three elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
if not total_words:
raise ValueError("total_words must be provided alongside corpus_file argument.")
from gensim.models.word2vec_corpusfile import CythonVocab
from gensim.models.fasttext import FastText
cython_vocab = CythonVocab(self.wv, hs=self.hs, fasttext=isinstance(self, FastText))
progress_queue = Queue()
corpus_file_size = os.path.getsize(corpus_file)
thread_kwargs = copy.copy(kwargs)
thread_kwargs['cur_epoch'] = cur_epoch
thread_kwargs['total_examples'] = total_examples
thread_kwargs['total_words'] = total_words
workers = [
threading.Thread(
target=self._worker_loop_corpusfile,
args=(
corpus_file, thread_id, corpus_file_size / self.workers * thread_id, cython_vocab, progress_queue
),
kwargs=thread_kwargs
) for thread_id in range(self.workers)
]
for thread in workers:
thread.daemon = True
thread.start()
trained_word_count, raw_word_count, job_tally = self._log_epoch_progress(
progress_queue=progress_queue, job_queue=None, cur_epoch=cur_epoch,
total_examples=total_examples, total_words=total_words, is_corpus_file_mode=True)
return trained_word_count, raw_word_count, job_tally
def _train_epoch(
self, data_iterable, cur_epoch=0, total_examples=None, total_words=None,
queue_factor=2, report_delay=1.0, callbacks=(),
):
"""Train the model for a single epoch.
Parameters
----------
data_iterable : iterable of list of object
The input corpus. This will be split in chunks and these chunks will be pushed to the queue.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus, used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus, used to log progress.
queue_factor : int, optional
Multiplier for size of queue -> size = number of workers * queue_factor.
report_delay : float, optional
Number of seconds between two consecutive progress report messages in the logger.
Returns
-------
(int, int, int)
The training report for this epoch consisting of three elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [
threading.Thread(
target=self._worker_loop,
args=(job_queue, progress_queue,))
for _ in range(self.workers)
]
workers.append(threading.Thread(
target=self._job_producer,
args=(data_iterable, job_queue),
kwargs={'cur_epoch': cur_epoch, 'total_examples': total_examples, 'total_words': total_words}))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
trained_word_count, raw_word_count, job_tally = self._log_epoch_progress(
progress_queue, job_queue, cur_epoch=cur_epoch, total_examples=total_examples,
total_words=total_words, report_delay=report_delay, is_corpus_file_mode=False,
)
return trained_word_count, raw_word_count, job_tally
def _get_next_alpha(self, epoch_progress, cur_epoch):
"""Get the correct learning rate for the next iteration.
Parameters
----------
epoch_progress : float
Ratio of finished work in the current epoch.
cur_epoch : int
Number of current iteration.
Returns
-------
float
The learning rate to be used in the next training epoch.
"""
start_alpha = self.alpha
end_alpha = self.min_alpha
progress = (cur_epoch + epoch_progress) / self.epochs
next_alpha = start_alpha - (start_alpha - end_alpha) * progress
next_alpha = max(end_alpha, next_alpha)
self.min_alpha_yet_reached = next_alpha
return next_alpha
def _get_thread_working_mem(self):
"""Computes the memory used per worker thread.
Returns
-------
(np.ndarray, np.ndarray)
Each worker threads private work memory.
"""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return work, neu1
def _raw_word_count(self, job):
"""Get the number of words in a given job.
Parameters
----------
job: iterable of list of str
The corpus chunk processed in a single batch.
Returns
-------
int
Number of raw words in the corpus chunk.
"""
return sum(len(sentence) for sentence in job)
def _check_corpus_sanity(self, corpus_iterable=None, corpus_file=None, passes=1):
"""Checks whether the corpus parameters make sense."""
if corpus_file is None and corpus_iterable is None:
raise TypeError("Either one of corpus_file or corpus_iterable value must be provided")
if corpus_file is not None and corpus_iterable is not None:
raise TypeError("Both corpus_file and corpus_iterable must not be provided at the same time")
if corpus_iterable is None and not os.path.isfile(corpus_file):
raise TypeError("Parameter corpus_file must be a valid path to a file, got %r instead" % corpus_file)
if corpus_iterable is not None and not isinstance(corpus_iterable, Iterable):
raise TypeError(
"The corpus_iterable must be an iterable of lists of strings, got %r instead" % corpus_iterable)
if corpus_iterable is not None and isinstance(corpus_iterable, GeneratorType) and passes > 1:
raise TypeError(
f"Using a generator as corpus_iterable can't support {passes} passes. Try a re-iterable sequence.")
if corpus_iterable is None:
_, corpus_ext = os.path.splitext(corpus_file)
if corpus_ext.lower() in get_supported_extensions():
raise TypeError(
f"Training from compressed files is not supported with the `corpus_path` argument. "
f"Please decompress {corpus_file} or use `corpus_iterable` instead."
)
def _check_training_sanity(self, epochs=0, total_examples=None, total_words=None, **kwargs):
"""Checks whether the training parameters make sense.
Parameters
----------
epochs : int
Number of training epochs. A positive integer.
total_examples : int, optional
Number of documents in the corpus. Either `total_examples` or `total_words` **must** be supplied.
total_words : int, optional
Number of words in the corpus. Either `total_examples` or `total_words` **must** be supplied.
**kwargs : object
Unused. Present to preserve signature among base and inherited implementations.
Raises
------
RuntimeError
If one of the required training pre/post processing steps have not been performed.
ValueError
If the combination of input parameters is inconsistent.
"""
if (not self.hs) and (not self.negative):
raise ValueError(
"You must set either 'hs' or 'negative' to be positive for proper training. "
"When both 'hs=0' and 'negative=0', there will be no training."
)
if self.hs and self.negative:
logger.warning(
"Both hierarchical softmax and negative sampling are activated. "
"This is probably a mistake. You should set either 'hs=0' "
"or 'negative=0' to disable one of them. "
)
if self.alpha > self.min_alpha_yet_reached:
logger.warning("Effective 'alpha' higher than previous training cycles")
if not self.wv.key_to_index: # should be set by `build_vocab`
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.vectors):
raise RuntimeError("you must initialize vectors before training the model")
if total_words is None and total_examples is None:
raise ValueError(
"You must specify either total_examples or total_words, for proper learning-rate "
"and progress calculations. "
"If you've just built the vocabulary using the same corpus, using the count cached "
"in the model is sufficient: total_examples=model.corpus_count."
)
if epochs is None or epochs <= 0:
raise ValueError("You must specify an explicit epochs count. The usual value is epochs=model.epochs.")
def _log_progress(
self, job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed
):
"""Callback used to log progress for long running jobs.
Parameters
----------
job_queue : Queue of (list of object, float)
The queue of jobs still to be performed by workers. Each job is represented as a tuple containing
the batch of data to be processed and the floating-point learning rate.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
cur_epoch : int
The current training iteration through the corpus.
example_count : int
Number of examples (could be sentences for example) processed until now.
total_examples : int
Number of all examples present in the input corpus.
raw_word_count : int
Number of words used in training until now.
total_words : int
Number of all words in the input corpus.
trained_word_count : int
Number of effective words used in training until now (after ignoring unknown words and trimming
the sentence length).
elapsed : int
Elapsed time since the beginning of training in seconds.
Notes
-----
If you train the model via `corpus_file` argument, there is no job_queue, so reported job_queue size will
always be equal to -1.
"""
if total_examples:
# examples-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch, 100.0 * example_count / total_examples, trained_word_count / elapsed,
-1 if job_queue is None else utils.qsize(job_queue), utils.qsize(progress_queue)
)
else:
# words-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch, 100.0 * raw_word_count / total_words, trained_word_count / elapsed,
-1 if job_queue is None else utils.qsize(job_queue), utils.qsize(progress_queue)
)
def _log_epoch_end(
self, cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed, is_corpus_file_mode
):
"""Callback used to log the end of a training epoch.
Parameters
----------
cur_epoch : int
The current training iteration through the corpus.
example_count : int
Number of examples (could be sentences for example) processed until now.
total_examples : int
Number of all examples present in the input corpus.
raw_word_count : int
Number of words used in training until now.
total_words : int
Number of all words in the input corpus.
trained_word_count : int
Number of effective words used in training until now (after ignoring unknown words and trimming
the sentence length).
elapsed : int
Elapsed time since the beginning of training in seconds.
is_corpus_file_mode : bool
Whether training is file-based (corpus_file argument) or not.
Warnings
--------
In case the corpus is changed while the epoch was running.
"""
logger.info(
"EPOCH %i: training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
cur_epoch, raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed,
)
# don't warn if training in file-based mode, because it's expected behavior
if is_corpus_file_mode:
return
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"EPOCH %i: supplied example count (%i) did not equal expected count (%i)", cur_epoch,
example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"EPOCH %i: supplied raw word count (%i) did not equal expected count (%i)", cur_epoch,
raw_word_count, total_words
)
def _log_train_end(self, raw_word_count, trained_word_count, total_elapsed, job_tally):
"""Callback to log the end of training.
Parameters
----------
raw_word_count : int
Number of words used in the whole training.
trained_word_count : int
Number of effective words used in training (after ignoring unknown words and trimming the sentence length).
total_elapsed : int
Total time spent during training in seconds.
job_tally : int
Total number of jobs processed during training.
"""
self.add_lifecycle_event("train", msg=(
f"training on {raw_word_count} raw words ({trained_word_count} effective words) "
f"took {total_elapsed:.1f}s, {trained_word_count / total_elapsed:.0f} effective words/s"
))
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""Score the log probability for a sequence of sentences.
This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that).
Gensim has currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with `hs=1` and `negative=0` for this to work.
Note that you should specify `total_sentences`; you'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations"
<https://arxiv.org/pdf/1504.07295.pdf>`_ and the
`gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of
how to use such scores in document classification.
Parameters
----------
sentences : iterable of list of str
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_sentences : int, optional
Count of sentences.
chunksize : int, optional
Chunksize of jobs
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
"""
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv), self.layer1_size, self.sg, self.hs,
self.sample, self.negative
)
if not self.wv.key_to_index:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = np.zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in range(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.wv.norms = None # clear any cached lengths
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def predict_output_word(self, context_words_list, topn=10):
"""Get the probability distribution of the center word given context words.
Note this performs a CBOW-style propagation, even in SG models,
and doesn't quite weight the surrounding words the same as in
training -- so it's just one crude way of using a trained model
as a predictor.
Parameters
----------
context_words_list : list of (str and/or int)
List of context words, which may be words themselves (str)
or their index in `self.wv.vectors` (int).
topn : int, optional
Return `topn` words and their probabilities.
Returns
-------
list of (str, float)
`topn` length list of tuples of (word, probability).
"""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'vectors') or not hasattr(self, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word2_indices = [self.wv.get_index(w) for w in context_words_list if w in self.wv]
if not word2_indices:
logger.warning("All the input context words are out-of-vocabulary for the current model.")
return None
l1 = np.sum(self.wv.vectors[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
# propagate hidden -> output and take softmax to get probabilities
prob_values = np.exp(np.dot(l1, self.syn1neg.T))
prob_values /= np.sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index_to_key[index1], prob_values[index1]) for index1 in top_indices]
def reset_from(self, other_model):
"""Borrow shareable pre-built structures from `other_model` and reset hidden layer weights.
Structures copied are:
* Vocabulary
* Index to word mapping
* Cumulative frequency table (used for negative sampling)
* Cached corpus length
Useful when testing multiple models on the same corpus in parallel. However, as the models
then share all vocabulary-related structures other than vectors, neither should then
expand their vocabulary (which could leave the other in an inconsistent, broken state).
And, any changes to any per-word 'vecattr' will affect both models.
Parameters
----------
other_model : :class:`~gensim.models.word2vec.Word2Vec`
Another model to copy the internal structures from.
"""
self.wv = KeyedVectors(self.vector_size)
self.wv.index_to_key = other_model.wv.index_to_key
self.wv.key_to_index = other_model.wv.key_to_index
self.wv.expandos = other_model.wv.expandos
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.init_weights()
def __str__(self):
"""Human readable representation of the model's state.
Returns
-------
str
Human readable representation of the model's state, including the vocabulary size, vector size
and learning rate.
"""
return "%s<vocab=%s, vector_size=%s, alpha=%s>" % (
self.__class__.__name__, len(self.wv.index_to_key), self.wv.vector_size, self.alpha,
)
def save(self, *args, **kwargs):
"""Save the model.
This saved model can be loaded again using :func:`~gensim.models.word2vec.Word2Vec.load`, which supports
online training and getting vectors for vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
super(Word2Vec, self).save(*args, **kwargs)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Arrange any special handling for the `gensim.utils.SaveLoad` protocol."""
# don't save properties that are merely calculated from others
ignore = set(ignore).union(['cum_table', ])
return super(Word2Vec, self)._save_specials(
fname, separately, sep_limit, ignore, pickle_protocol, compress, subname)
@classmethod
def load(cls, *args, rethrow=False, **kwargs):
"""Load a previously saved :class:`~gensim.models.word2vec.Word2Vec` model.
See Also
--------
:meth:`~gensim.models.word2vec.Word2Vec.save`
Save model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.word2vec.Word2Vec`
Loaded model.
"""
try:
model = super(Word2Vec, cls).load(*args, **kwargs)
if not isinstance(model, Word2Vec):
rethrow = True
raise AttributeError("Model of type %s can't be loaded by %s" % (type(model), str(cls)))
return model
except AttributeError as ae:
if rethrow:
raise ae
logger.error(
"Model load error. Was model saved using code from an older Gensim Version? "
"Try loading older model using gensim-3.8.3, then re-saving, to restore "
"compatibility with current code.")
raise ae
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(Word2Vec, self)._load_specials(*args, **kwargs)
# for backward compatibility, add/rearrange properties from prior versions
if not hasattr(self, 'ns_exponent'):
self.ns_exponent = 0.75
if self.negative and hasattr(self.wv, 'index_to_key'):
self.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(self, 'corpus_count'):
self.corpus_count = None
if not hasattr(self, 'corpus_total_words'):
self.corpus_total_words = None
if not hasattr(self.wv, 'vectors_lockf') and hasattr(self.wv, 'vectors'):
self.wv.vectors_lockf = np.ones(1, dtype=REAL)
if not hasattr(self, 'random'):
# use new instance of numpy's recommended generator/algorithm
self.random = np.random.default_rng(seed=self.seed)
if not hasattr(self, 'train_count'):
self.train_count = 0
self.total_train_time = 0
if not hasattr(self, 'epochs'):
self.epochs = self.iter
del self.iter
if not hasattr(self, 'max_final_vocab'):
self.max_final_vocab = None
if hasattr(self, 'vocabulary'): # re-integrate state that had been moved
for a in ('max_vocab_size', 'min_count', 'sample', 'sorted_vocab', 'null_word', 'raw_vocab'):
setattr(self, a, getattr(self.vocabulary, a))
del self.vocabulary
if hasattr(self, 'trainables'): # re-integrate state that had been moved
for a in ('hashfxn', 'layer1_size', 'seed', 'syn1neg', 'syn1'):
if hasattr(self.trainables, a):
setattr(self, a, getattr(self.trainables, a))
del self.trainables
if not hasattr(self, 'shrink_windows'):
self.shrink_windows = True
def get_latest_training_loss(self):
"""Get current value of the training loss.
Returns
-------
float
Current training loss.
"""
return self.running_training_loss
class BrownCorpus:
def __init__(self, dirname):
"""Iterate over sentences from the `Brown corpus <https://en.wikipedia.org/wiki/Brown_Corpus>`_
(part of `NLTK data <https://www.nltk.org/data.html>`_).
"""
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
with utils.open(fname, 'rb') as fin:
for line in fin:
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus:
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
"""Iterate over sentences from the "text8" corpus, unzipped from https://mattmahoney.net/dc/text8.zip."""
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.open(self.fname, 'rb') as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence:
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""Iterate over a file that contains sentences: one line = one sentence.
Words must be already preprocessed and separated by whitespace.
Parameters
----------
source : string or a file-like object
Path to the file on disk, or an already-open file object (must support `seek(0)`).
limit : int or None
Clip the file to the first `limit` lines. Do no clipping if `limit is None` (the default).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> sentences = LineSentence(datapath('lee_background.cor'))
>>> for sentence in sentences:
... pass
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.open(self.source, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences:
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
in alphabetical order by filename.
The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
.bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
Warnings
--------
Does **not recurse** into subdirectories.
Parameters
----------
source : str
Path to the directory.
limit : int or None
Read only the first `limit` lines from each file. Read all if limit is None (the default).
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.open(file_name, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
class Word2VecVocab(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture."""
pass
class Word2VecTrainables(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture."""
pass
class Heapitem(namedtuple('Heapitem', 'count, index, left, right')):
def __lt__(self, other):
return self.count < other.count
def _build_heap(wv):
heap = list(Heapitem(wv.get_vecattr(i, 'count'), i, None, None) for i in range(len(wv.index_to_key)))
heapq.heapify(heap)
for i in range(len(wv) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Heapitem(count=min1.count + min2.count, index=i + len(wv), left=min1, right=min2)
)
return heap
def _assign_binary_codes(wv):
"""
Appends a binary code to each vocab term.
Parameters
----------
wv : KeyedVectors
A collection of word-vectors.
Sets the .code and .point attributes of each node.
Each code is a numpy.array containing 0s and 1s.
Each point is an integer.
"""
logger.info("constructing a huffman tree from %i words", len(wv))
heap = _build_heap(wv)
if not heap:
#
# TODO: how can we end up with an empty heap?
#
logger.info("built huffman tree with maximum node depth 0")
return
# recurse over the tree, assigning a binary code to each vocabulary word
max_depth = 0
stack = [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node[1] < len(wv): # node[1] = index
# leaf node => store its path from the root
k = node[1]
wv.set_vecattr(k, 'code', codes)
wv.set_vecattr(k, 'point', points)
# node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = np.array(list(points) + [node.index - len(wv)], dtype=np.uint32)
stack.append((node.left, np.array(list(codes) + [0], dtype=np.uint8), points))
stack.append((node.right, np.array(list(codes) + [1], dtype=np.uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
np.seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, vector_size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, epochs=args.iter,
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
| 107,470
|
Python
|
.py
| 1,986
| 43.098187
| 120
| 0.62792
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,106
|
word2vec_inner.pyx
|
piskvorky_gensim/gensim/models/word2vec_inner.pyx
|
#!/usr/bin/env cython
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized cython functions for training :class:`~gensim.models.word2vec.Word2Vec` model."""
import cython
import numpy as np
cimport numpy as np
from libc.math cimport exp
from libc.math cimport log
from libc.string cimport memset
import scipy.linalg.blas as fblas
REAL = np.float32
DEF MAX_SENTENCE_LEN = 10000
cdef scopy_ptr scopy=<scopy_ptr>PyCObject_AsVoidPtr(fblas.scopy._cpointer) # y = x
cdef saxpy_ptr saxpy=<saxpy_ptr>PyCObject_AsVoidPtr(fblas.saxpy._cpointer) # y += alpha * x
cdef sdot_ptr sdot=<sdot_ptr>PyCObject_AsVoidPtr(fblas.sdot._cpointer) # float = dot(x, y)
cdef dsdot_ptr dsdot=<dsdot_ptr>PyCObject_AsVoidPtr(fblas.sdot._cpointer) # double = dot(x, y)
cdef snrm2_ptr snrm2=<snrm2_ptr>PyCObject_AsVoidPtr(fblas.snrm2._cpointer) # sqrt(x^2)
cdef sscal_ptr sscal=<sscal_ptr>PyCObject_AsVoidPtr(fblas.sscal._cpointer) # x = alpha * x
DEF EXP_TABLE_SIZE = 1000
DEF MAX_EXP = 6
cdef REAL_t[EXP_TABLE_SIZE] EXP_TABLE
cdef REAL_t[EXP_TABLE_SIZE] LOG_TABLE
cdef int ONE = 1
cdef REAL_t ONEF = <REAL_t>1.0
# for when fblas.sdot returns a double
cdef REAL_t our_dot_double(const int *N, const float *X, const int *incX, const float *Y, const int *incY) nogil:
return <REAL_t>dsdot(N, X, incX, Y, incY)
# for when fblas.sdot returns a float
cdef REAL_t our_dot_float(const int *N, const float *X, const int *incX, const float *Y, const int *incY) nogil:
return <REAL_t>sdot(N, X, incX, Y, incY)
# for when no blas available
cdef REAL_t our_dot_noblas(const int *N, const float *X, const int *incX, const float *Y, const int *incY) nogil:
# not a true full dot()-implementation: just enough for our cases
cdef int i
cdef REAL_t a
a = <REAL_t>0.0
for i from 0 <= i < N[0] by 1:
a += X[i] * Y[i]
return a
# for when no blas available
cdef void our_saxpy_noblas(const int *N, const float *alpha, const float *X, const int *incX, float *Y, const int *incY) nogil:
cdef int i
for i from 0 <= i < N[0] by 1:
Y[i * (incY[0])] = (alpha[0]) * X[i * (incX[0])] + Y[i * (incY[0])]
cdef void w2v_fast_sentence_sg_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, const int codelen,
REAL_t *syn0, REAL_t *syn1, const int size,
const np.uint32_t word2_index, const REAL_t alpha, REAL_t *work, REAL_t *words_lockf,
const np.uint32_t lockf_len, const int _compute_loss, REAL_t *_running_training_loss_param) nogil:
"""Train on a single effective word from the current batch, using the Skip-Gram model.
In this model we are using a given word to predict a context word (a word that is
close to the one we are using as training). Hierarchical softmax is used to speed-up
training.
Parameters
----------
word_point
Vector representation of the current word.
word_code
ASCII (char == uint8) representation of the current word.
codelen
Number of characters (length) in the current word.
syn0
Embeddings for the words in the vocabulary (`model.wv.vectors`)
syn1
Weights of the hidden layer in the model's trainable neural network.
size
Length of the embeddings.
word2_index
Index of the context word in the vocabulary.
alpha
Learning rate.
work
Private working memory for each worker.
words_lockf
Lock factors for each word. A value of 0 will block training.
_compute_loss
Whether or not the loss should be computed at this step.
_running_training_loss_param
Running loss, used to debug or inspect how training progresses.
"""
cdef long long a, b
cdef long long row1 = <long long>word2_index * <long long>size, row2, sgn
cdef REAL_t f, g, f_dot, lprob
memset(work, 0, size * cython.sizeof(REAL_t))
for b in range(codelen):
row2 = <long long>word_point[b] * <long long>size
f_dot = our_dot(&size, &syn0[row1], &ONE, &syn1[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * alpha
if _compute_loss == 1:
sgn = (-1)**word_code[b] # ch function: 0-> 1, 1 -> -1
lprob = sgn*f_dot
if lprob <= -MAX_EXP or lprob >= MAX_EXP:
continue
lprob = LOG_TABLE[<int>((lprob + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
_running_training_loss_param[0] = _running_training_loss_param[0] - lprob
our_saxpy(&size, &g, &syn1[row2], &ONE, work, &ONE)
our_saxpy(&size, &g, &syn0[row1], &ONE, &syn1[row2], &ONE)
our_saxpy(&size, &words_lockf[word2_index % lockf_len], work, &ONE, &syn0[row1], &ONE)
# to support random draws from negative-sampling cum_table
cdef inline unsigned long long bisect_left(np.uint32_t *a, unsigned long long x, unsigned long long lo, unsigned long long hi) nogil:
cdef unsigned long long mid
while hi > lo:
mid = (lo + hi) >> 1
if a[mid] >= x:
hi = mid
else:
lo = mid + 1
return lo
# this quick & dirty RNG apparently matches Java's (non-Secure)Random
# note this function side-effects next_random to set up the next number
cdef inline unsigned long long random_int32(unsigned long long *next_random) nogil:
cdef unsigned long long this_random = next_random[0] >> 16
next_random[0] = (next_random[0] * <unsigned long long>25214903917ULL + 11) & 281474976710655ULL
return this_random
cdef unsigned long long w2v_fast_sentence_sg_neg(
const int negative, np.uint32_t *cum_table, unsigned long long cum_table_len,
REAL_t *syn0, REAL_t *syn1neg, const int size, const np.uint32_t word_index,
const np.uint32_t word2_index, const REAL_t alpha, REAL_t *work,
unsigned long long next_random, REAL_t *words_lockf,
const np.uint32_t lockf_len, const int _compute_loss, REAL_t *_running_training_loss_param) nogil:
"""Train on a single effective word from the current batch, using the Skip-Gram model.
In this model we are using a given word to predict a context word (a word that is
close to the one we are using as training). Negative sampling is used to speed-up
training.
Parameters
----------
negative
Number of negative words to be sampled.
cum_table
Cumulative-distribution table using stored vocabulary word counts for
drawing random words (with a negative label).
cum_table_len
Length of the `cum_table`
syn0
Embeddings for the words in the vocabulary (`model.wv.vectors`)
syn1neg
Weights of the hidden layer in the model's trainable neural network.
size
Length of the embeddings.
word_index
Index of the current training word in the vocabulary.
word2_index
Index of the context word in the vocabulary.
alpha
Learning rate.
work
Private working memory for each worker.
next_random
Seed to produce the index for the next word to be randomly sampled.
words_lockf
Lock factors for each word. A value of 0 will block training.
_compute_loss
Whether or not the loss should be computed at this step.
_running_training_loss_param
Running loss, used to debug or inspect how training progresses.
Returns
-------
Seed to draw the training word for the next iteration of the same routine.
"""
cdef long long a
cdef long long row1 = <long long>word2_index * <long long>size, row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, label, f_dot, log_e_f_dot
cdef np.uint32_t target_index
cdef int d
memset(work, 0, size * cython.sizeof(REAL_t))
for d in range(negative+1):
if d == 0:
target_index = word_index
label = ONEF
else:
target_index = bisect_left(cum_table, (next_random >> 16) % cum_table[cum_table_len-1], 0, cum_table_len)
next_random = (next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == word_index:
continue
label = <REAL_t>0.0
row2 = <long long>target_index * <long long>size
f_dot = our_dot(&size, &syn0[row1], &ONE, &syn1neg[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * alpha
if _compute_loss == 1:
f_dot = (f_dot if d == 0 else -f_dot)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
log_e_f_dot = LOG_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
_running_training_loss_param[0] = _running_training_loss_param[0] - log_e_f_dot
our_saxpy(&size, &g, &syn1neg[row2], &ONE, work, &ONE)
our_saxpy(&size, &g, &syn0[row1], &ONE, &syn1neg[row2], &ONE)
our_saxpy(&size, &words_lockf[word2_index % lockf_len], work, &ONE, &syn0[row1], &ONE)
return next_random
cdef void w2v_fast_sentence_cbow_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, int codelens[MAX_SENTENCE_LEN],
REAL_t *neu1, REAL_t *syn0, REAL_t *syn1, const int size,
const np.uint32_t indexes[MAX_SENTENCE_LEN], const REAL_t alpha, REAL_t *work,
int i, int j, int k, int cbow_mean, REAL_t *words_lockf, const np.uint32_t lockf_len,
const int _compute_loss, REAL_t *_running_training_loss_param) nogil:
"""Train on a single effective word from the current batch, using the CBOW method.
Using this method we train the trainable neural network by attempting to predict a
given word by its context (words surrounding the one we are trying to predict).
Hierarchical softmax method is used to speed-up training.
Parameters
----------
word_point
Vector representation of the current word.
word_code
ASCII (char == uint8) representation of the current word.
codelens
Number of characters (length) for all words in the context.
neu1
Private working memory for every worker.
syn0
Embeddings for the words in the vocabulary (`model.wv.vectors`)
syn1
Weights of the hidden layer in the model's trainable neural network.
size
Length of the embeddings.
word2_index
Index of the context word in the vocabulary.
alpha
Learning rate.
work
Private working memory for each worker.
i
Index of the word to be predicted from the context.
j
Index of the word at the beginning of the context window.
k
Index of the word at the end of the context window.
cbow_mean
If 0, use the sum of the context word vectors as the prediction. If 1, use the mean.
words_lockf
Lock factors for each word. A value of 0 will block training.
_compute_loss
Whether or not the loss should be computed at this step.
_running_training_loss_param
Running loss, used to debug or inspect how training progresses.
"""
cdef long long a, b
cdef long long row2, sgn
cdef REAL_t f, g, count, inv_count = 1.0, f_dot, lprob
cdef int m
memset(neu1, 0, size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
else:
count += ONEF
our_saxpy(&size, &ONEF, &syn0[<long long>indexes[m] * <long long>size], &ONE, neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF/count
if cbow_mean:
sscal(&size, &inv_count, neu1, &ONE) # (does this need BLAS-variants like saxpy?)
memset(work, 0, size * cython.sizeof(REAL_t))
for b in range(codelens[i]):
row2 = <long long>word_point[b] * <long long>size
f_dot = our_dot(&size, neu1, &ONE, &syn1[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * alpha
if _compute_loss == 1:
sgn = (-1)**word_code[b] # ch function: 0-> 1, 1 -> -1
lprob = sgn*f_dot
if lprob <= -MAX_EXP or lprob >= MAX_EXP:
continue
lprob = LOG_TABLE[<int>((lprob + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
_running_training_loss_param[0] = _running_training_loss_param[0] - lprob
our_saxpy(&size, &g, &syn1[row2], &ONE, work, &ONE)
our_saxpy(&size, &g, neu1, &ONE, &syn1[row2], &ONE)
if not cbow_mean: # divide error over summed window vectors
sscal(&size, &inv_count, work, &ONE) # (does this need BLAS-variants like saxpy?)
for m in range(j, k):
if m == i:
continue
else:
our_saxpy(&size, &words_lockf[indexes[m] % lockf_len], work, &ONE, &syn0[<long long>indexes[m] * <long long>size], &ONE)
cdef unsigned long long w2v_fast_sentence_cbow_neg(
const int negative, np.uint32_t *cum_table, unsigned long long cum_table_len, int codelens[MAX_SENTENCE_LEN],
REAL_t *neu1, REAL_t *syn0, REAL_t *syn1neg, const int size,
const np.uint32_t indexes[MAX_SENTENCE_LEN], const REAL_t alpha, REAL_t *work,
int i, int j, int k, int cbow_mean, unsigned long long next_random, REAL_t *words_lockf,
const np.uint32_t lockf_len, const int _compute_loss, REAL_t *_running_training_loss_param) nogil:
"""Train on a single effective word from the current batch, using the CBOW method.
Using this method we train the trainable neural network by attempting to predict a
given word by its context (words surrounding the one we are trying to predict).
Negative sampling is used to speed-up training.
Parameters
----------
negative
Number of negative words to be sampled.
cum_table
Cumulative-distribution table using stored vocabulary word counts for
drawing random words (with a negative label).
cum_table_len
Length of the `cum_table`
codelens
Number of characters (length) for all words in the context.
neu1
Private working memory for every worker.
syn0
Embeddings for the words in the vocabulary (`model.wv.vectors`)
syn1neg
Weights of the hidden layer in the model's trainable neural network.
size
Length of the embeddings.
indexes
Indexes of the context words in the vocabulary.
alpha
Learning rate.
work
Private working memory for each worker.
i
Index of the word to be predicted from the context.
j
Index of the word at the beginning of the context window.
k
Index of the word at the end of the context window.
cbow_mean
If 0, use the sum of the context word vectors as the prediction. If 1, use the mean.
next_random
Seed for the drawing the predicted word for the next iteration of the same routine.
words_lockf
Lock factors for each word. A value of 0 will block training.
_compute_loss
Whether or not the loss should be computed at this step.
_running_training_loss_param
Running loss, used to debug or inspect how training progresses.
"""
cdef long long a
cdef long long row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, count, inv_count = 1.0, label, log_e_f_dot, f_dot
cdef np.uint32_t target_index, word_index
cdef int d, m
word_index = indexes[i]
memset(neu1, 0, size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
else:
count += ONEF
our_saxpy(&size, &ONEF, &syn0[<long long>indexes[m] * <long long>size], &ONE, neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF/count
if cbow_mean:
sscal(&size, &inv_count, neu1, &ONE) # (does this need BLAS-variants like saxpy?)
memset(work, 0, size * cython.sizeof(REAL_t))
for d in range(negative+1):
if d == 0:
target_index = word_index
label = ONEF
else:
target_index = bisect_left(cum_table, (next_random >> 16) % cum_table[cum_table_len-1], 0, cum_table_len)
next_random = (next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == word_index:
continue
label = <REAL_t>0.0
row2 = <long long>target_index * <long long>size
f_dot = our_dot(&size, neu1, &ONE, &syn1neg[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * alpha
if _compute_loss == 1:
f_dot = (f_dot if d == 0 else -f_dot)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
log_e_f_dot = LOG_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
_running_training_loss_param[0] = _running_training_loss_param[0] - log_e_f_dot
our_saxpy(&size, &g, &syn1neg[row2], &ONE, work, &ONE)
our_saxpy(&size, &g, neu1, &ONE, &syn1neg[row2], &ONE)
if not cbow_mean: # divide error over summed window vectors
sscal(&size, &inv_count, work, &ONE) # (does this need BLAS-variants like saxpy?)
for m in range(j,k):
if m == i:
continue
else:
our_saxpy(&size, &words_lockf[indexes[m] % lockf_len], work, &ONE, &syn0[<long long>indexes[m] * <long long>size], &ONE)
return next_random
cdef init_w2v_config(Word2VecConfig *c, model, alpha, compute_loss, _work, _neu1=None):
c[0].hs = model.hs
c[0].negative = model.negative
c[0].sample = (model.sample != 0)
c[0].cbow_mean = model.cbow_mean
c[0].window = model.window
c[0].workers = model.workers
c[0].compute_loss = (1 if compute_loss else 0)
c[0].running_training_loss = model.running_training_loss
c[0].syn0 = <REAL_t *>(np.PyArray_DATA(model.wv.vectors))
c[0].words_lockf = <REAL_t *>(np.PyArray_DATA(model.wv.vectors_lockf))
c[0].words_lockf_len = len(model.wv.vectors_lockf)
c[0].alpha = alpha
c[0].size = model.wv.vector_size
if c[0].hs:
c[0].syn1 = <REAL_t *>(np.PyArray_DATA(model.syn1))
if c[0].negative:
c[0].syn1neg = <REAL_t *>(np.PyArray_DATA(model.syn1neg))
c[0].cum_table = <np.uint32_t *>(np.PyArray_DATA(model.cum_table))
c[0].cum_table_len = len(model.cum_table)
if c[0].negative or c[0].sample:
c[0].next_random = (2**24) * model.random.randint(0, 2**24) + model.random.randint(0, 2**24)
# convert Python structures to primitive types, so we can release the GIL
c[0].work = <REAL_t *>np.PyArray_DATA(_work)
if _neu1 is not None:
c[0].neu1 = <REAL_t *>np.PyArray_DATA(_neu1)
def train_batch_sg(model, sentences, alpha, _work, compute_loss):
"""Update skip-gram model by training on a batch of sentences.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.word2Vec.Word2Vec`
The Word2Vec model instance to train.
sentences : iterable of list of str
The corpus used to train the model.
alpha : float
The learning rate
_work : np.ndarray
Private working memory for each worker.
compute_loss : bool
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef Word2VecConfig c
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef int sent_idx, idx_start, idx_end
cdef np.uint32_t *vocab_sample_ints
init_w2v_config(&c, model, alpha, compute_loss, _work)
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(model.wv.expandos['sample_int'])
if c.hs:
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
# prepare C structures so we can go "full C" and release the Python GIL
c.sentence_idx[0] = 0 # indices of the first sentence always start at 0
for sent in sentences:
if not sent:
continue # ignore empty sentences; leave effective_sentences unchanged
for token in sent:
if token not in model.wv.key_to_index:
continue # leaving `effective_words` unchanged = shortening the sentence = expanding the window
word_index = model.wv.key_to_index[token]
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[effective_words] = word_index
if c.hs:
c.codelens[effective_words] = <int>len(vocab_codes[word_index])
c.codes[effective_words] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[effective_words] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
effective_words += 1
if effective_words == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# keep track of which words go into which sentence, so we don't train
# across sentence boundaries.
# indices of sentence number X are between <sentence_idx[X], sentence_idx[X])
effective_sentences += 1
c.sentence_idx[effective_sentences] = effective_words
if effective_words == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# precompute "reduced window" offsets in a single randint() call
if model.shrink_windows:
for i, item in enumerate(model.random.randint(0, c.window, effective_words)):
c.reduced_windows[i] = item
else:
for i in range(effective_words):
c.reduced_windows[i] = 0
# release GIL & train on all sentences
with nogil:
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
for j in range(j, k):
if j == i:
continue
if c.hs:
w2v_fast_sentence_sg_hs(c.points[i], c.codes[i], c.codelens[i], c.syn0, c.syn1, c.size, c.indexes[j], c.alpha, c.work, c.words_lockf, c.words_lockf_len, c.compute_loss, &c.running_training_loss)
if c.negative:
c.next_random = w2v_fast_sentence_sg_neg(c.negative, c.cum_table, c.cum_table_len, c.syn0, c.syn1neg, c.size, c.indexes[i], c.indexes[j], c.alpha, c.work, c.next_random, c.words_lockf, c.words_lockf_len, c.compute_loss, &c.running_training_loss)
model.running_training_loss = c.running_training_loss
return effective_words
def train_batch_cbow(model, sentences, alpha, _work, _neu1, compute_loss):
"""Update CBOW model by training on a batch of sentences.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The Word2Vec model instance to train.
sentences : iterable of list of str
The corpus used to train the model.
alpha : float
The learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
compute_loss : bool
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef Word2VecConfig c
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef int sent_idx, idx_start, idx_end
cdef np.uint32_t *vocab_sample_ints
init_w2v_config(&c, model, alpha, compute_loss, _work, _neu1)
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(model.wv.expandos['sample_int'])
if c.hs:
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
# prepare C structures so we can go "full C" and release the Python GIL
c.sentence_idx[0] = 0 # indices of the first sentence always start at 0
for sent in sentences:
if not sent:
continue # ignore empty sentences; leave effective_sentences unchanged
for token in sent:
if token not in model.wv.key_to_index:
continue # leaving `effective_words` unchanged = shortening the sentence = expanding the window
word_index = model.wv.key_to_index[token]
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[effective_words] = word_index
if c.hs:
c.codelens[effective_words] = <int>len(vocab_codes[word_index])
c.codes[effective_words] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[effective_words] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
effective_words += 1
if effective_words == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# keep track of which words go into which sentence, so we don't train
# across sentence boundaries.
# indices of sentence number X are between <sentence_idx[X], sentence_idx[X])
effective_sentences += 1
c.sentence_idx[effective_sentences] = effective_words
if effective_words == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# precompute "reduced window" offsets in a single randint() call
if model.shrink_windows:
for i, item in enumerate(model.random.randint(0, c.window, effective_words)):
c.reduced_windows[i] = item
else:
for i in range(effective_words):
c.reduced_windows[i] = 0
# release GIL & train on all sentences
with nogil:
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
if c.hs:
w2v_fast_sentence_cbow_hs(c.points[i], c.codes[i], c.codelens, c.neu1, c.syn0, c.syn1, c.size, c.indexes, c.alpha, c.work, i, j, k, c.cbow_mean, c.words_lockf, c.words_lockf_len, c.compute_loss, &c.running_training_loss)
if c.negative:
c.next_random = w2v_fast_sentence_cbow_neg(c.negative, c.cum_table, c.cum_table_len, c.codelens, c.neu1, c.syn0, c.syn1neg, c.size, c.indexes, c.alpha, c.work, i, j, k, c.cbow_mean, c.next_random, c.words_lockf, c.words_lockf_len, c.compute_loss, &c.running_training_loss)
model.running_training_loss = c.running_training_loss
return effective_words
def score_sentence_sg(model, sentence, _work):
"""Obtain likelihood score for a single sentence in a fitted skip-gram representation.
Notes
-----
This scoring function is only implemented for hierarchical softmax (`model.hs == 1`).
The model should have been trained using the skip-gram model (`model.sg` == 1`).
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model. It **MUST** have been trained using hierarchical softmax and the skip-gram algorithm.
sentence : list of str
The words comprising the sentence to be scored.
_work : np.ndarray
Private working memory for each worker.
Returns
-------
float
The probability assigned to this sentence by the Skip-Gram model.
"""
cdef Word2VecConfig c
c.syn0 = <REAL_t *>(np.PyArray_DATA(model.wv.vectors))
c.size = model.wv.vector_size
c.window = model.window
cdef int i, j, k
cdef long result = 0
cdef int sentence_len
c.syn1 = <REAL_t *>(np.PyArray_DATA(model.syn1))
# convert Python structures to primitive types, so we can release the GIL
c.work = <REAL_t *>np.PyArray_DATA(_work)
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
i = 0
for token in sentence:
word_index = model.wv.key_to_index[token] if token in model.wv.key_to_index else None
if word_index is None:
# For score, should this be a default negative value?
#
# See comment by @gojomo at https://github.com/RaRe-Technologies/gensim/pull/2698/files#r445827846 :
#
# These 'score' functions are a long-ago contribution from @mataddy whose
# current function/utility is unclear.
# I've continued to apply mechanical updates to match other changes, and the code
# still compiles & passes the one (trivial, form-but-not-function) unit test. But it's an
# idiosyncratic technique, and only works for the non-default hs mode. Here, in lieu of the
# previous cryptic # should drop the comment, I've asked if for the purposes of this
# particular kind of 'scoring' (really, loss-tallying indicating how divergent this new
# text is from what the model learned during training), shouldn't completely missing
# words imply something very negative, as opposed to nothing-at-all? But probably, this
# functionality should be dropped. (And ultimately, a talented cleanup of the largely-broken
# loss-tallying functions might provide a cleaner window into this same measure of how
# well a text contrasts with model expectations - such as a way to report loss from a
# single invocation of one fo the inner train methods, without changing the model.)
continue
c.indexes[i] = word_index
c.codelens[i] = <int>len(vocab_codes[word_index])
c.codes[i] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[i] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
result += 1
i += 1
if i == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
sentence_len = i
# release GIL & train on the sentence
c.work[0] = 0.0
with nogil:
for i in range(sentence_len):
if c.codelens[i] == 0:
continue
j = i - c.window
if j < 0:
j = 0
k = i + c.window + 1
if k > sentence_len:
k = sentence_len
for j in range(j, k):
if j == i or c.codelens[j] == 0:
continue
score_pair_sg_hs(c.points[i], c.codes[i], c.codelens[i], c.syn0, c.syn1, c.size, c.indexes[j], c.work)
return c.work[0]
cdef void score_pair_sg_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, const int codelen,
REAL_t *syn0, REAL_t *syn1, const int size,
const np.uint32_t word2_index, REAL_t *work) nogil:
cdef long long b
cdef long long row1 = <long long>word2_index * <long long>size, row2, sgn
cdef REAL_t f
for b in range(codelen):
row2 = <long long>word_point[b] * <long long>size
f = our_dot(&size, &syn0[row1], &ONE, &syn1[row2], &ONE)
sgn = (-1)**word_code[b] # ch function: 0-> 1, 1 -> -1
f *= sgn
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = LOG_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
work[0] += f
def score_sentence_cbow(model, sentence, _work, _neu1):
"""Obtain likelihood score for a single sentence in a fitted CBOW representation.
Notes
-----
This scoring function is only implemented for hierarchical softmax (`model.hs == 1`).
The model should have been trained using the skip-gram model (`model.cbow` == 1`).
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model. It **MUST** have been trained using hierarchical softmax and the CBOW algorithm.
sentence : list of str
The words comprising the sentence to be scored.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
Returns
-------
float
The probability assigned to this sentence by the Skip-Gram model.
"""
cdef Word2VecConfig c
c.cbow_mean = model.cbow_mean
c.syn0 = <REAL_t *>(np.PyArray_DATA(model.wv.vectors))
c.size = model.wv.vector_size
c.window = model.window
cdef int i, j, k
cdef long result = 0
c.syn1 = <REAL_t *>(np.PyArray_DATA(model.syn1))
# convert Python structures to primitive types, so we can release the GIL
c.work = <REAL_t *>np.PyArray_DATA(_work)
c.neu1 = <REAL_t *>np.PyArray_DATA(_neu1)
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
i = 0
for token in sentence:
word_index = model.wv.key_to_index[token] if token in model.wv.key_to_index else None
if word_index is None:
continue # for score, should this be a default negative value?
c.indexes[i] = word_index
c.codelens[i] = <int>len(vocab_codes[word_index])
c.codes[i] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[i] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
result += 1
i += 1
if i == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
sentence_len = i
# release GIL & train on the sentence
c.work[0] = 0.0
with nogil:
for i in range(sentence_len):
if c.codelens[i] == 0:
continue
j = i - c.window
if j < 0:
j = 0
k = i + c.window + 1
if k > sentence_len:
k = sentence_len
score_pair_cbow_hs(c.points[i], c.codes[i], c.codelens, c.neu1, c.syn0, c.syn1, c.size, c.indexes, c.work, i, j, k, c.cbow_mean)
return c.work[0]
cdef void score_pair_cbow_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, int codelens[MAX_SENTENCE_LEN],
REAL_t *neu1, REAL_t *syn0, REAL_t *syn1, const int size,
const np.uint32_t indexes[MAX_SENTENCE_LEN], REAL_t *work,
int i, int j, int k, int cbow_mean) nogil:
cdef long long a, b
cdef long long row2
cdef REAL_t f, g, count, inv_count, sgn
cdef int m
memset(neu1, 0, size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i or codelens[m] == 0:
continue
else:
count += ONEF
our_saxpy(&size, &ONEF, &syn0[<long long>indexes[m] * <long long>size], &ONE, neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF/count
if cbow_mean:
sscal(&size, &inv_count, neu1, &ONE)
for b in range(codelens[i]):
row2 = <long long>word_point[b] * <long long>size
f = our_dot(&size, neu1, &ONE, &syn1[row2], &ONE)
sgn = (-1)**word_code[b] # ch function: 0-> 1, 1 -> -1
f *= sgn
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = LOG_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
work[0] += f
def init():
"""Precompute function `sigmoid(x) = 1 / (1 + exp(-x))`, for x values discretized into table EXP_TABLE.
Also calculate log(sigmoid(x)) into LOG_TABLE.
Returns
-------
{0, 1, 2}
Enumeration to signify underlying data type returned by the BLAS dot product calculation.
0 signifies double, 1 signifies float, and 2 signifies that custom cython loops were used
instead of BLAS.
"""
global our_dot
global our_saxpy
cdef int i
cdef float *x = [<float>10.0]
cdef float *y = [<float>0.01]
cdef float expected = <float>0.1
cdef int size = 1
cdef double d_res
cdef float *p_res
# build the sigmoid table
for i in range(EXP_TABLE_SIZE):
EXP_TABLE[i] = <REAL_t>exp((i / <REAL_t>EXP_TABLE_SIZE * 2 - 1) * MAX_EXP)
EXP_TABLE[i] = <REAL_t>(EXP_TABLE[i] / (EXP_TABLE[i] + 1))
LOG_TABLE[i] = <REAL_t>log( EXP_TABLE[i] )
# check whether sdot returns double or float
d_res = dsdot(&size, x, &ONE, y, &ONE)
p_res = <float *>&d_res
if abs(d_res - expected) < 0.0001:
our_dot = our_dot_double
our_saxpy = saxpy
return 0 # double
elif abs(p_res[0] - expected) < 0.0001:
our_dot = our_dot_float
our_saxpy = saxpy
return 1 # float
else:
# neither => use cython loops, no BLAS
# actually, the BLAS is so messed up we'll probably have segfaulted above and never even reach here
our_dot = our_dot_noblas
our_saxpy = our_saxpy_noblas
return 2
FAST_VERSION = init() # initialize the module
MAX_WORDS_IN_BATCH = MAX_SENTENCE_LEN
| 38,285
|
Python
|
.py
| 821
| 38.717418
| 292
| 0.62989
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,107
|
nmf.py
|
piskvorky_gensim/gensim/models/nmf.py
|
"""Online Non-Negative Matrix Factorization.
Implementation of the efficient incremental algorithm of Renbo Zhao, Vincent Y. F. Tan et al.
`[PDF] <https://arxiv.org/abs/1604.02634>`_.
This NMF implementation updates in a streaming fashion and works best with sparse corpora.
- W is a word-topic matrix
- h is a topic-document matrix
- v is an input corpus batch, word-document matrix
- A, B - matrices that accumulate information from every consecutive chunk. A = h.dot(ht), B = v.dot(ht).
The idea of the algorithm is as follows:
.. code-block:: text
Initialize W, A and B matrices
Input the corpus
Split the corpus into batches
for v in batches:
infer h:
do coordinate gradient descent step to find h that minimizes (v - Wh) l2 norm
bound h so that it is non-negative
update A and B:
A = h.dot(ht)
B = v.dot(ht)
update W:
do gradient descent step to find W that minimizes 0.5*trace(WtWA) - trace(WtB) l2 norm
Examples
--------
Train an NMF model using a Gensim corpus
.. sourcecode:: pycon
>>> from gensim.models import Nmf
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> # Create a corpus from a list of texts
>>> common_dictionary = Dictionary(common_texts)
>>> common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
>>>
>>> # Train the model on the corpus.
>>> nmf = Nmf(common_corpus, num_topics=10)
Save a model to disk, or reload a pre-trained model
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Save model to disk.
>>> temp_file = datapath("model")
>>> nmf.save(temp_file)
>>>
>>> # Load a potentially pretrained model from disk.
>>> nmf = Nmf.load(temp_file)
Infer vectors for new documents
.. sourcecode:: pycon
>>> # Create a new corpus, made of previously unseen documents.
>>> other_texts = [
... ['computer', 'time', 'graph'],
... ['survey', 'response', 'eps'],
... ['human', 'system', 'computer']
... ]
>>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]
>>>
>>> unseen_doc = other_corpus[0]
>>> vector = Nmf[unseen_doc] # get topic probability distribution for a document
Update the model by incrementally training on the new corpus
.. sourcecode:: pycon
>>> nmf.update(other_corpus)
>>> vector = nmf[unseen_doc]
A lot of parameters can be tuned to optimize training for your specific case
.. sourcecode:: pycon
>>> nmf = Nmf(common_corpus, num_topics=50, kappa=0.1, eval_every=5) # decrease training step size
The NMF should be used whenever one needs extremely fast and memory optimized topic model.
"""
import collections.abc
import logging
import numpy as np
import scipy.sparse
from scipy.stats import halfnorm
from gensim import interfaces
from gensim import matutils
from gensim import utils
from gensim.interfaces import TransformedCorpus
from gensim.models import basemodel, CoherenceModel
from gensim.models.nmf_pgd import solve_h
logger = logging.getLogger(__name__)
def version_tuple(version, prefix=2):
return tuple(map(int, version.split(".")[:prefix]))
OLD_SCIPY = version_tuple(scipy.__version__) <= (0, 18)
class Nmf(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""Online Non-Negative Matrix Factorization.
`Renbo Zhao et al :"Online Nonnegative Matrix Factorization with Outliers" <https://arxiv.org/abs/1604.02634>`_
"""
def __init__(
self,
corpus=None,
num_topics=100,
id2word=None,
chunksize=2000,
passes=1,
kappa=1.0,
minimum_probability=0.01,
w_max_iter=200,
w_stop_condition=1e-4,
h_max_iter=50,
h_stop_condition=1e-3,
eval_every=10,
normalize=True,
random_state=None,
):
r"""
Parameters
----------
corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents), optional
Training corpus.
Can be either iterable of documents, which are lists of `(word_id, word_count)`,
or a sparse csc matrix of BOWs for each document.
If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).
num_topics : int, optional
Number of topics to extract.
id2word: {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
chunksize: int, optional
Number of documents to be used in each training chunk.
passes: int, optional
Number of full passes over the training corpus.
Leave at default `passes=1` if your input is an iterator.
kappa : float, optional
Gradient descent step size.
Larger value makes the model train faster, but could lead to non-convergence if set too large.
minimum_probability:
If `normalize` is True, topics with smaller probabilities are filtered out.
If `normalize` is False, topics with smaller factors are filtered out.
If set to None, a value of 1e-8 is used to prevent 0s.
w_max_iter: int, optional
Maximum number of iterations to train W per each batch.
w_stop_condition: float, optional
If error difference gets less than that, training of ``W`` stops for the current batch.
h_max_iter: int, optional
Maximum number of iterations to train h per each batch.
h_stop_condition: float
If error difference gets less than that, training of ``h`` stops for the current batch.
eval_every: int, optional
Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low.
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
random_state: {np.random.RandomState, int}, optional
Seed for random generator. Needed for reproducibility.
"""
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.passes = passes
self._kappa = kappa
self.minimum_probability = minimum_probability
self._w_max_iter = w_max_iter
self._w_stop_condition = w_stop_condition
self._h_max_iter = h_max_iter
self._h_stop_condition = h_stop_condition
self.eval_every = eval_every
self.normalize = normalize
self.random_state = utils.get_random_state(random_state)
self.v_max = None
if self.id2word is None:
self.id2word = utils.dict_from_corpus(corpus)
self.num_tokens = len(self.id2word)
self.A = None
self.B = None
self._W = None
self.w_std = None
self._w_error = np.inf
self._h = None
if corpus is not None:
self.update(corpus)
def get_topics(self, normalize=None):
"""Get the term-topic matrix learned during inference.
Parameters
----------
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
numpy.ndarray
The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
"""
dense_topics = self._W.T
if normalize is None:
normalize = self.normalize
if normalize:
return dense_topics / dense_topics.sum(axis=1).reshape(-1, 1)
return dense_topics
def __getitem__(self, bow, eps=None):
return self.get_document_topics(bow, eps)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True, normalize=None):
"""Get the topics sorted by sparsity.
Parameters
----------
num_topics : int, optional
Number of topics to be returned. Unlike LSA, there is no natural ordering between the topics in NMF.
The returned topics subset of all topics is therefore arbitrary and may change between two NMF
training runs.
num_words : int, optional
Number of words to be presented for each topic. These will be the most relevant words (assigned the highest
probability for each topic).
log : bool, optional
Whether the result is also logged, besides being returned.
formatted : bool, optional
Whether the topic representations should be formatted as strings. If False, they are returned as
2 tuples of (word, probability).
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
list of {str, tuple of (str, float)}
a list of topics, each represented either as a string (when `formatted` == True) or word-probability
pairs.
"""
if normalize is None:
normalize = self.normalize
# Compute fraction of zero elements in each column
sparsity = np.zeros(self._W.shape[1])
for row in self._W:
sparsity += (row == 0)
sparsity /= self._W.shape[0]
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
sorted_topics = list(matutils.argsort(sparsity))
chosen_topics = (
sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:]
)
shown = []
topics = self.get_topics(normalize=normalize)
for i in chosen_topics:
topic = topics[i]
bestn = matutils.argsort(topic, num_words, reverse=True).ravel()
topic = [(self.id2word[id], topic[id]) for id in bestn]
if formatted:
topic = " + ".join(['%.3f*"%s"' % (v, k) for k, v in topic])
shown.append((i, topic))
if log:
logger.info("topic #%i (%.3f): %s", i, sparsity[i], topic)
return shown
def show_topic(self, topicid, topn=10, normalize=None):
"""Get the representation for a single topic. Words here are the actual strings, in constrast to
:meth:`~gensim.models.nmf.Nmf.get_topic_terms` that represents words by their vocabulary ID.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
list of (str, float)
Word - probability pairs for the most relevant words generated by the topic.
"""
if normalize is None:
normalize = self.normalize
return [
(self.id2word[id], value)
for id, value in self.get_topic_terms(topicid, topn,
normalize=normalize)
]
def get_topic_terms(self, topicid, topn=10, normalize=None):
"""Get the representation for a single topic. Words the integer IDs, in constrast to
:meth:`~gensim.models.nmf.Nmf.show_topic` that represents words by the actual strings.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
list of (int, float)
Word ID - probability pairs for the most relevant words generated by the topic.
"""
topic = self._W[:, topicid]
if normalize is None:
normalize = self.normalize
if normalize:
topic /= topic.sum()
bestn = matutils.argsort(topic, topn, reverse=True)
return [(idx, topic[idx]) for idx in bestn]
def top_topics(self, corpus, texts=None, dictionary=None, window_size=None,
coherence='u_mass', topn=20, processes=-1):
"""Get the topics sorted by coherence.
Parameters
----------
corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)
Training corpus.
Can be either iterable of documents, which are lists of `(word_id, word_count)`,
or a sparse csc matrix of BOWs for each document.
If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).
texts : list of list of str, optional
Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)
probability estimator .
dictionary : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}, optional
Dictionary mapping of id word to create corpus.
If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.
window_size : int, optional
Is the size of the window to be used for coherence measures using boolean sliding window as their
probability estimator. For 'u_mass' this doesn't matter.
If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used.
Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.
For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus
using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
processes : int, optional
Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as
num_cpus - 1.
Returns
-------
list of (list of (int, str), float)
Each element in the list is a pair of a topic representation and its coherence score. Topic representations
are distributions of words, represented as a list of pairs of word IDs and their probabilities.
"""
cm = CoherenceModel(
model=self, corpus=corpus, texts=texts, dictionary=dictionary,
window_size=window_size, coherence=coherence, topn=topn,
processes=processes
)
coherence_scores = cm.get_coherence_per_topic()
str_topics = []
for topic in self.get_topics(): # topic = array of vocab_size floats, one per term
bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic
beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token
str_topics.append(beststr) # list of topn (float membership, token) tuples
scored_topics = zip(str_topics, coherence_scores)
return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)
def get_term_topics(self, word_id, minimum_probability=None, normalize=None):
"""Get the most relevant topics to the given word.
Parameters
----------
word_id : int
The word for which the topic distribution will be computed.
minimum_probability : float, optional
If `normalize` is True, topics with smaller probabilities are filtered out.
If `normalize` is False, topics with smaller factors are filtered out.
If set to None, a value of 1e-8 is used to prevent 0s.
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
list of (int, float)
The relevant topics represented as pairs of their ID and their assigned probability, sorted
by relevance to the given word.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8)
# if user enters word instead of id in vocab, change to get id
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
word_topics = self._W[word_id]
if normalize is None:
normalize = self.normalize
if normalize and word_topics.sum() > 0:
word_topics /= word_topics.sum()
for topic_id in range(0, self.num_topics):
word_coef = word_topics[topic_id]
if word_coef >= minimum_probability:
values.append((topic_id, word_coef))
return values
def get_document_topics(self, bow, minimum_probability=None,
normalize=None):
"""Get the topic distribution for the given document.
Parameters
----------
bow : list of (int, float)
The document in BOW format.
minimum_probability : float
If `normalize` is True, topics with smaller probabilities are filtered out.
If `normalize` is False, topics with smaller factors are filtered out.
If set to None, a value of 1e-8 is used to prevent 0s.
normalize: bool or None, optional
Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.
Returns
-------
list of (int, float)
Topic distribution for the whole document. Each element in the list is a pair of a topic's id, and
the probability that was assigned to it.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8)
# if the input vector is a corpus, return a transformed corpus
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(minimum_probability=minimum_probability)
return self._apply(corpus, **kwargs)
v = matutils.corpus2csc([bow], self.num_tokens)
h = self._solveproj(v, self._W, v_max=np.inf)
if normalize is None:
normalize = self.normalize
if normalize:
the_sum = h.sum()
if the_sum:
h /= the_sum
return [
(idx, proba)
for idx, proba in enumerate(h[:, 0])
if not minimum_probability or proba > minimum_probability
]
def _setup(self, v):
"""Infer info from the first batch and initialize the matrices.
Parameters
----------
v : `csc_matrix` with the shape (n_tokens, chunksize)
Batch of bows.
"""
self.w_std = np.sqrt(v.mean() / (self.num_tokens * self.num_topics))
self._W = np.abs(
self.w_std
* halfnorm.rvs(
size=(self.num_tokens, self.num_topics), random_state=self.random_state
)
)
self.A = np.zeros((self.num_topics, self.num_topics))
self.B = np.zeros((self.num_tokens, self.num_topics))
def l2_norm(self, v):
Wt = self._W.T
l2 = 0
for doc, doc_topics in zip(v.T, self._h.T):
l2 += np.sum(np.square((doc - doc_topics.dot(Wt))))
return np.sqrt(l2)
def update(self, corpus, chunksize=None, passes=None, eval_every=None):
"""Train the model with new documents.
Parameters
----------
corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)
Training corpus.
Can be either iterable of documents, which are lists of `(word_id, word_count)`,
or a sparse csc matrix of BOWs for each document.
If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).
chunksize: int, optional
Number of documents to be used in each training chunk.
passes: int, optional
Number of full passes over the training corpus.
Leave at default `passes=1` if your input is an iterator.
eval_every: int, optional
Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low.
"""
# use parameters given in constructor, unless user explicitly overrode them
if passes is None:
passes = self.passes
if eval_every is None:
eval_every = self.eval_every
lencorpus = np.inf
if isinstance(corpus, scipy.sparse.csc.csc_matrix):
lencorpus = corpus.shape[1]
else:
try:
lencorpus = len(corpus)
except TypeError:
logger.info("input corpus stream has no len()")
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
evalafter = min(lencorpus, (eval_every or 0) * chunksize)
if lencorpus == 0:
logger.warning("Nmf.update() called with an empty corpus")
return
if isinstance(corpus, collections.abc.Iterator) and self.passes > 1:
raise ValueError("Corpus is an iterator, only `passes=1` is valid.")
logger.info(
"running NMF training, %s topics, %i passes over the supplied corpus of %s documents, evaluating L2 "
"norm every %i documents",
self.num_topics, passes, "unknown number of" if lencorpus is None else lencorpus, evalafter,
)
chunk_overall_idx = 1
for pass_ in range(passes):
if isinstance(corpus, scipy.sparse.csc.csc_matrix):
grouper = (
# Older scipy (0.19 etc) throw an error when slicing beyond the actual sparse array dimensions, so
# we clip manually with min() here.
corpus[:, col_idx:min(corpus.shape[1], col_idx + self.chunksize)]
for col_idx
in range(0, corpus.shape[1], self.chunksize)
)
else:
grouper = utils.grouper(corpus, self.chunksize)
for chunk_idx, chunk in enumerate(grouper):
if isinstance(corpus, scipy.sparse.csc.csc_matrix):
v = chunk[:, self.random_state.permutation(chunk.shape[1])]
chunk_len = v.shape[1]
else:
self.random_state.shuffle(chunk)
v = matutils.corpus2csc(
chunk,
num_terms=self.num_tokens,
)
chunk_len = len(chunk)
if np.isinf(lencorpus):
logger.info(
"PROGRESS: pass %i, at document #%i",
pass_, chunk_idx * chunksize + chunk_len
)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_idx * chunksize + chunk_len, lencorpus
)
if self._W is None:
# If `self._W` is not set (i.e. the first batch being handled), compute the initial matrix using the
# batch mean.
self._setup(v)
self._h = self._solveproj(v, self._W, h=self._h, v_max=self.v_max)
h = self._h
if eval_every and (((chunk_idx + 1) * chunksize >= lencorpus) or (chunk_idx + 1) % eval_every == 0):
logger.info("L2 norm: %s", self.l2_norm(v))
self.print_topics(5)
self.A *= chunk_overall_idx - 1
self.A += h.dot(h.T)
self.A /= chunk_overall_idx
self.B *= chunk_overall_idx - 1
self.B += v.dot(h.T)
self.B /= chunk_overall_idx
self._solve_w()
chunk_overall_idx += 1
logger.info("W error: %s", self._w_error)
def _solve_w(self):
"""Update W."""
def error(WA):
"""An optimized version of 0.5 * trace(WtWA) - trace(WtB)."""
return 0.5 * np.einsum('ij,ij', WA, self._W) - np.einsum('ij,ij', self._W, self.B)
eta = self._kappa / np.linalg.norm(self.A)
for iter_number in range(self._w_max_iter):
logger.debug("w_error: %s", self._w_error)
WA = self._W.dot(self.A)
self._W -= eta * (WA - self.B)
self._transform()
error_ = error(WA)
if (
self._w_error < np.inf
and np.abs((error_ - self._w_error) / self._w_error) < self._w_stop_condition
):
self._w_error = error_
break
self._w_error = error_
def _apply(self, corpus, chunksize=None, **kwargs):
"""Apply the transformation to a whole corpus and get the result as another corpus.
Parameters
----------
corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)
Training corpus.
Can be either iterable of documents, which are lists of `(word_id, word_count)`,
or a sparse csc matrix of BOWs for each document.
If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).
chunksize : int, optional
If provided, a more effective processing will performed.
Returns
-------
:class:`~gensim.interfaces.TransformedCorpus`
Transformed corpus.
"""
return TransformedCorpus(self, corpus, chunksize, **kwargs)
def _transform(self):
"""Apply boundaries on W."""
np.clip(self._W, 0, self.v_max, out=self._W)
sumsq = np.sqrt(np.einsum('ij,ij->j', self._W, self._W))
np.maximum(sumsq, 1, out=sumsq)
self._W /= sumsq
@staticmethod
def _dense_dot_csc(dense, csc):
if OLD_SCIPY:
return (csc.T.dot(dense.T)).T
else:
return scipy.sparse.csc_matrix.dot(dense, csc)
def _solveproj(self, v, W, h=None, v_max=None):
"""Update residuals and representation (h) matrices.
Parameters
----------
v : scipy.sparse.csc_matrix
Subset of training corpus.
W : ndarray
Dictionary matrix.
h : ndarray
Representation matrix.
v_max : float
Maximum possible value in matrices.
"""
m, n = W.shape
if v_max is not None:
self.v_max = v_max
elif self.v_max is None:
self.v_max = v.max()
batch_size = v.shape[1]
hshape = (n, batch_size)
if h is None or h.shape != hshape:
h = np.zeros(hshape)
Wt = W.T
WtW = Wt.dot(W)
h_error = None
for iter_number in range(self._h_max_iter):
logger.debug("h_error: %s", h_error)
Wtv = self._dense_dot_csc(Wt, v)
permutation = self.random_state.permutation(self.num_topics).astype(np.int32)
error_ = solve_h(h, Wtv, WtW, permutation, self._kappa)
error_ /= m
if h_error and np.abs(h_error - error_) < self._h_stop_condition:
break
h_error = error_
return h
| 28,493
|
Python
|
.py
| 608
| 36.139803
| 120
| 0.598556
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,108
|
lsi_dispatcher.py
|
piskvorky_gensim/gensim/models/lsi_dispatcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Dispatcher process which orchestrates distributed :class:`~gensim.models.lsimodel.LsiModel` computations.
Run this script only once, on any node in your cluster.
Notes
-----
The dispatcher expects to find worker scripts already running. Make sure you run as many workers as you like on
your machines **before** launching the dispatcher.
How to use distributed LSI
--------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lsi_worker &
#. Run dispatcher ::
python -m gensim.models.lsi_dispatcher &
#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lsi_dispatcher --help
:ellipsis: 0, -5
"""
import os
import sys
import logging
import argparse
import threading
import time
from queue import Queue
import Pyro4
from gensim import utils
logger = logging.getLogger(__name__)
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher:
"""Dispatcher object that communicates and coordinates individual workers.
Warnings
--------
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""Partly initialize the dispatcher.
A full initialization (including initialization of the workers) requires a call to
:meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`
Parameters
----------
maxsize : int, optional
Maximum number of jobs to be kept pre-fetched in the queue.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
@Pyro4.expose
def initialize(self, **model_params):
"""Fully initialize the dispatcher and all its workers.
Parameters
----------
**model_params
Keyword parameters used to initialize individual workers
(gets handed all the way down to :meth:`gensim.models.lsi_worker.Worker.initialize`).
See :class:`~gensim.models.lsimodel.LsiModel`.
Raises
------
RuntimeError
When no workers are found (the :mod:`gensim.model.lsi_worker` script must be ran beforehand).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
for name, uri in ns.list(prefix='gensim.lsi_worker').items():
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i from %s", workerid, uri)
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server", uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""Get pyro URIs of all registered workers.
Returns
-------
list of URIs
The pyro URIs for each worker.
"""
return [worker._pyroUri for worker in self.workers.values()]
@Pyro4.expose
def getjob(self, worker_id):
"""Atomically pop a job from the queue.
Parameters
----------
worker_id : int
The worker that requested the job.
Returns
-------
iterable of iterable of (int, float)
The corpus in BoW format.
"""
logger.info("worker #%i requesting a new job", worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
return job
@Pyro4.expose
def putjob(self, job):
"""Atomically add a job to the queue.
Parameters
----------
job : iterable of list of (int, float)
The corpus in BoW format.
"""
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""Merge projections from across all workers and get the final projection.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
The current projection of the total model.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers", len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s", workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
@Pyro4.expose
def reset(self):
"""Re-initialize all workers for a new decomposition."""
for workerid, worker in self.workers.items():
logger.info("resetting worker %s", workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.
Callback used by workers to notify when their job is done.
The job done event is logged and then control is asynchronously transfered back to the worker
(who can then request another job). In this way, control flow basically oscillates between
:meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lsi_worker.Worker.requestjob`.
Parameters
----------
workerid : int
The ID of the worker that finished the job (used for logging).
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.
Returns
-------
int
Number of jobs already completed.
"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""Terminate all registered workers and then the dispatcher."""
for workerid, worker in self.workers.items():
logger.info("terminating worker %s", workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'maxsize',
nargs='?',
type=int,
help='Maximum number of jobs to be kept pre-fetched in the queue.',
default=MAX_JOBS_QUEUE,
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))
logger.info("finished running %s", parser.prog)
| 9,903
|
Python
|
.py
| 224
| 36.308036
| 119
| 0.644923
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,109
|
rpmodel.py
|
piskvorky_gensim/gensim/models/rpmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Random Projections (also known as Random Indexing).
For theoretical background on Random Projections, see [1]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import RpModel
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import common_texts, temporary_file
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(text) for text in common_texts] # convert texts to BoW format
>>>
>>> model = RpModel(corpus, id2word=dictionary) # fit model
>>> result = model[corpus[3]] # apply model to document, result is vector in BoW format
>>>
>>> with temporary_file("model_file") as fname:
... model.save(fname) # save model to file
... loaded_model = RpModel.load(fname) # load model
References
----------
.. [1] Kanerva et al., 2000, Random indexing of text samples for Latent Semantic Analysis,
https://cloudfront.escholarship.org/dist/prd/content/qt5644k0w6/qt5644k0w6.pdf
"""
import logging
import numpy as np
from gensim import interfaces, matutils, utils
logger = logging.getLogger(__name__)
class RpModel(interfaces.TransformationABC):
def __init__(self, corpus, id2word=None, num_topics=300):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping `token_id` -> `token`, will be determine from corpus if `id2word == None`.
num_topics : int, optional
Number of topics.
"""
self.id2word = id2word
self.num_topics = num_topics
if corpus is not None:
self.initialize(corpus)
self.add_lifecycle_event("created", msg=f"created {self}")
def __str__(self):
return "%s<num_terms=%s, num_topics=%s>" % (self.__class__.__name__, self.num_terms, self.num_topics)
def initialize(self, corpus):
"""Initialize the random projection matrix.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
if self.id2word is None:
logger.info("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif self.id2word:
self.num_terms = 1 + max(self.id2word)
else:
self.num_terms = 0
shape = self.num_topics, self.num_terms
logger.info("constructing %s random matrix", str(shape))
# Now construct the projection matrix itself.
# Here i use a particular form, derived in "Achlioptas: Database-friendly random projection",
# and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).
randmat = 1 - 2 * np.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1
# convert from int32 to floats, for faster multiplications
self.projection = np.asfortranarray(randmat, dtype=np.float32)
# TODO: check whether the Fortran-order shenanigans still make sense. In the original
# code (~2010), this made a BIG difference for np BLAS implementations; perhaps now the wrappers
# are smarter and this is no longer needed?
def __getitem__(self, bow):
"""Get random-projection representation of the input vector or corpus.
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Input document or corpus.
Returns
-------
list of (int, float)
if `bow` is document OR
:class:`~gensim.interfaces.TransformedCorpus`
if `bow` is corpus.
Examples
----------
.. sourcecode:: pycon
>>> from gensim.models import RpModel
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(text) for text in common_texts] # convert texts to BoW format
>>>
>>> model = RpModel(corpus, id2word=dictionary) # fit model
>>>
>>> # apply model to document, result is vector in BoW format, i.e. [(1, 0.3), ... ]
>>> result = model[corpus[0]]
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
if getattr(self, 'freshly_loaded', False):
# This is a hack to work around a bug in np, where a FORTRAN-order array
# unpickled from disk segfaults on using it.
self.freshly_loaded = False
self.projection = self.projection.copy('F') # simply making a fresh copy fixes the broken array
vec = matutils.sparse2full(bow, self.num_terms).reshape(self.num_terms, 1) / np.sqrt(self.num_topics)
vec = np.asfortranarray(vec, dtype=np.float32)
topic_dist = np.dot(self.projection, vec) # (k, d) * (d, 1) = (k, 1)
return [
(topicid, float(topicvalue)) for topicid, topicvalue in enumerate(topic_dist.flat)
if np.isfinite(topicvalue) and not np.allclose(topicvalue, 0.0)
]
def __setstate__(self, state):
"""Sets the internal state and updates freshly_loaded to True, called when unpicked.
Parameters
----------
state : dict
State of the class.
"""
self.__dict__ = state
self.freshly_loaded = True
| 6,020
|
Python
|
.py
| 129
| 38.124031
| 109
| 0.619235
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,110
|
doc2vec_corpusfile.pyx
|
piskvorky_gensim/gensim/models/doc2vec_corpusfile.pyx
|
#!/usr/bin/env cython
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
#
# Copyright (C) 2018 Dmitry Persiyanov <dmitry.persiyanov@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized cython functions for file-based training :class:`~gensim.models.doc2vec.Doc2Vec` model."""
import cython
import numpy as np
cimport numpy as np
from libcpp.string cimport string
from libcpp.vector cimport vector
from libc.string cimport memset, memcpy
# scipy <= 0.15
try:
from scipy.linalg.blas import fblas
except ImportError:
# in scipy > 0.15, fblas function has been removed
import scipy.linalg.blas as fblas
from gensim.models.doc2vec_inner cimport (
fast_document_dbow_hs,
fast_document_dbow_neg,
fast_document_dm_hs,
fast_document_dm_neg,
fast_document_dmc_hs,
fast_document_dmc_neg,
init_d2v_config,
Doc2VecConfig
)
from gensim.models.word2vec_inner cimport random_int32, sscal, REAL_t, our_saxpy
from gensim.models.word2vec_corpusfile cimport (
VocabItem,
CythonVocab,
CythonLineSentence,
get_alpha,
get_next_alpha,
cvocab_t
)
DEF MAX_DOCUMENT_LEN = 10000
cdef int ONE = 1
cdef REAL_t ONEF = <REAL_t>1.0
cdef void prepare_c_structures_for_batch(
vector[string] &doc_words, int sample, int hs, int window, long long *total_words,
int *effective_words, unsigned long long *next_random, cvocab_t *vocab,
np.uint32_t *indexes, int *codelens, np.uint8_t **codes, np.uint32_t **points,
np.uint32_t *reduced_windows, int *document_len, int train_words,
int docvecs_count, int doc_tag, int shrink_windows,
) nogil:
cdef VocabItem predict_word
cdef string token
cdef int i = 0
total_words[0] += doc_words.size()
for token in doc_words:
if vocab[0].find(token) == vocab[0].end(): # shrink document to leave out word
continue # leaving i unchanged
predict_word = vocab[0][token]
if sample and predict_word.sample_int < random_int32(next_random):
continue
indexes[i] = predict_word.index
if hs:
codelens[i] = predict_word.code_len
codes[i] = predict_word.code
points[i] = predict_word.point
effective_words[0] += 1
i += 1
if i == MAX_DOCUMENT_LEN:
break # TODO: log warning, tally overflow?
document_len[0] = i
if train_words and reduced_windows != NULL:
if shrink_windows:
for i in range(document_len[0]):
reduced_windows[i] = random_int32(next_random) % window
else:
for i in range(document_len[0]):
reduced_windows[i] = 0
if doc_tag < docvecs_count:
effective_words[0] += 1
def d2v_train_epoch_dbow(
model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch, _expected_examples,
_expected_words, work, neu1, docvecs_count, word_vectors=None, words_lockf=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
doctag_vectors=None, doctags_lockf=None,
):
"""Train distributed bag of words model ("PV-DBOW") by training on a corpus file.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The FastText model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
work : np.ndarray
Private working memory for each worker.
neu1 : np.ndarray
Private working memory for each worker.
train_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both** `learn_words`
and `train_words` are set to True.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector, value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, document_len
cdef int effective_words = 0
cdef long long total_documents = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
cdef vector[string] doc_words
cdef long long _doc_tag = start_doctag
init_d2v_config(
&c, model, _alpha, learn_doctags, learn_words, learn_hidden, train_words=train_words,
work=work, neu1=neu1, word_vectors=word_vectors, words_lockf=words_lockf,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf, docvecs_count=docvecs_count)
# release GIL & train on the full corpus, document by document
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_words = 0
doc_words = input_stream.read_sentence()
if doc_words.empty():
continue
prepare_c_structures_for_batch(
doc_words, c.sample, c.hs, c.window, &total_words, &effective_words,
&c.next_random, vocab.get_vocab_ptr(), c.indexes, c.codelens, c.codes, c.points,
c.reduced_windows, &document_len, c.train_words, c.docvecs_count, _doc_tag, shrink_windows)
for i in range(document_len):
if c.train_words: # simultaneous skip-gram wordvec-training
j = i - c.window + c.reduced_windows[i]
if j < 0:
j = 0
k = i + c.window + 1 - c.reduced_windows[i]
if k > document_len:
k = document_len
for j in range(j, k):
if j == i:
continue
if c.hs:
# we reuse the DBOW function, as it is equivalent to skip-gram for this purpose
fast_document_dbow_hs(
c.points[i], c.codes[i], c.codelens[i], c.word_vectors, c.syn1, c.layer1_size,
c.indexes[j], c.alpha, c.work, c.learn_words, c.learn_hidden, c.words_lockf,
c.words_lockf_len)
if c.negative:
# we reuse the DBOW function, as it is equivalent to skip-gram for this purpose
c.next_random = fast_document_dbow_neg(
c.negative, c.cum_table, c.cum_table_len, c.word_vectors, c.syn1neg,
c.layer1_size, c.indexes[i], c.indexes[j], c.alpha, c.work,
c.next_random, c.learn_words, c.learn_hidden, c.words_lockf, c.words_lockf_len)
# docvec-training
if _doc_tag < c.docvecs_count:
if c.hs:
fast_document_dbow_hs(
c.points[i], c.codes[i], c.codelens[i], c.doctag_vectors, c.syn1, c.layer1_size,
_doc_tag, c.alpha, c.work, c.learn_doctags, c.learn_hidden, c.doctags_lockf,
c.doctags_lockf_len)
if c.negative:
c.next_random = fast_document_dbow_neg(
c.negative, c.cum_table, c.cum_table_len, c.doctag_vectors, c.syn1neg,
c.layer1_size, c.indexes[i], _doc_tag, c.alpha, c.work, c.next_random,
c.learn_doctags, c.learn_hidden, c.doctags_lockf, c.doctags_lockf_len)
total_documents += 1
total_effective_words += effective_words
_doc_tag += 1
c.alpha = get_next_alpha(
start_alpha, end_alpha, total_documents, total_words,
expected_examples, expected_words, cur_epoch, num_epochs)
return total_documents, total_effective_words, total_words
def d2v_train_epoch_dm(
model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch, _expected_examples,
_expected_words, work, neu1, docvecs_count, word_vectors=None, words_lockf=None,
learn_doctags=True, learn_words=True, learn_hidden=True, doctag_vectors=None, doctags_lockf=None,
):
"""Train distributed memory model ("PV-DM") by training on a corpus file.
This method implements the DM model with a projection (input) layer that is either the sum or mean of the context
vectors, depending on the model's `dm_mean` configuration field.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The FastText model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
work : np.ndarray
Private working memory for each worker.
neu1 : np.ndarray
Private working memory for each worker.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector, value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k, m, document_len
cdef int effective_words = 0
cdef long long total_documents = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef REAL_t count, inv_count = 1.0
cdef int shrink_windows = int(model.shrink_windows)
cdef vector[string] doc_words
cdef long long _doc_tag = start_doctag
init_d2v_config(
&c, model, _alpha, learn_doctags, learn_words, learn_hidden, train_words=False,
work=work, neu1=neu1, word_vectors=word_vectors, words_lockf=words_lockf,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf, docvecs_count=docvecs_count)
# release GIL & train on the full corpus, document by document
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_words = 0
doc_words = input_stream.read_sentence()
if doc_words.empty():
continue
prepare_c_structures_for_batch(
doc_words, c.sample, c.hs, c.window, &total_words, &effective_words, &c.next_random,
vocab.get_vocab_ptr(), c.indexes, c.codelens, c.codes, c.points, c.reduced_windows,
&document_len, c.train_words, c.docvecs_count, _doc_tag, shrink_windows)
for i in range(document_len):
j = i - c.window + c.reduced_windows[i]
if j < 0:
j = 0
k = i + c.window + 1 - c.reduced_windows[i]
if k > document_len:
k = document_len
# compose l1 (in _neu1) & clear _work
memset(c.neu1, 0, c.layer1_size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
else:
count += ONEF
our_saxpy(&c.layer1_size, &ONEF, &c.word_vectors[c.indexes[m] * c.layer1_size], &ONE, c.neu1, &ONE)
if _doc_tag < c.docvecs_count:
count += ONEF
our_saxpy(&c.layer1_size, &ONEF, &c.doctag_vectors[_doc_tag * c.layer1_size], &ONE, c.neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF/count
if c.cbow_mean:
sscal(&c.layer1_size, &inv_count, c.neu1, &ONE) # (does this need BLAS-variants like saxpy?)
memset(c.work, 0, c.layer1_size * cython.sizeof(REAL_t)) # work to accumulate l1 error
if c.hs:
fast_document_dm_hs(
c.points[i], c.codes[i], c.codelens[i], c.neu1,
c.syn1, c.alpha, c.work, c.layer1_size, c.learn_hidden)
if c.negative:
c.next_random = fast_document_dm_neg(
c.negative, c.cum_table, c.cum_table_len, c.next_random, c.neu1,
c.syn1neg, c.indexes[i], c.alpha, c.work, c.layer1_size, c.learn_hidden)
if not c.cbow_mean:
sscal(&c.layer1_size, &inv_count, c.work, &ONE) # (does this need BLAS-variants like saxpy?)
# apply accumulated error in work
if c.learn_doctags and _doc_tag < c.docvecs_count:
our_saxpy(
&c.layer1_size, &c.doctags_lockf[_doc_tag % c.doctags_lockf_len], c.work,
&ONE, &c.doctag_vectors[_doc_tag * c.layer1_size], &ONE)
if c.learn_words:
for m in range(j, k):
if m == i:
continue
else:
our_saxpy(
&c.layer1_size, &c.words_lockf[c.indexes[m] % c.words_lockf_len], c.work, &ONE,
&c.word_vectors[c.indexes[m] * c.layer1_size], &ONE)
total_documents += 1
total_effective_words += effective_words
_doc_tag += 1
c.alpha = get_next_alpha(
start_alpha, end_alpha, total_documents, total_words, expected_examples,
expected_words, cur_epoch, num_epochs)
return total_documents, total_effective_words, total_words
def d2v_train_epoch_dm_concat(
model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch, _expected_examples,
_expected_words, work, neu1, docvecs_count, word_vectors=None, words_lockf=None,
learn_doctags=True, learn_words=True, learn_hidden=True, doctag_vectors=None,
doctags_lockf=None,
):
"""Train distributed memory model ("PV-DM") by training on a corpus file, using a concatenation of the context
window word vectors (rather than a sum or average).
This might be slower since the input at each batch will be significantly larger.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The FastText model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
work : np.ndarray
Private working memory for each worker.
neu1 : np.ndarray
Private working memory for each worker.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector, value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k, m, n, document_len
cdef int effective_words = 0
cdef long long total_documents = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
cdef vector[string] doc_words
cdef long long _doc_tag = start_doctag
init_d2v_config(
&c, model, _alpha, learn_doctags, learn_words, learn_hidden, train_words=False,
work=work, neu1=neu1, word_vectors=word_vectors, words_lockf=words_lockf,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf, docvecs_count=docvecs_count)
# release GIL & train on the full corpus, document by document
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_words = 0
doc_words = input_stream.read_sentence()
# FIXME? These next 2 lines look fishy to me (gojomo). First, skipping to
# 'total_documents' (end) seems it'd do nothing useful. Second, assigning
# into what is typically a count (`doctag_len`) from a boolean test is
# sketchy, even if in the current limitations of this mode (corpus_file)
# only '1' is a workable value. But, this code seems to pass at least
# one real has-some-function test (test_dmc_hs_fromfile), and this mode
# is rarely used, & I haven't written this code & would prefer to see the
# whole duplicate-logic of corpus_file mode removed in favor of an approach
# with less duplication. So I'm not sure anything is broken & it's far from
# a near-term priority - thus leaving this note.
_doc_tag = total_documents
c.doctag_len = _doc_tag < c.docvecs_count
# skip doc either empty or without expected number of tags
if doc_words.empty() or c.expected_doctag_len != c.doctag_len:
continue
prepare_c_structures_for_batch(
doc_words, c.sample, c.hs, c.window, &total_words, &effective_words,
&c.next_random, vocab.get_vocab_ptr(), c.indexes, c.codelens, c.codes,
c.points, NULL, &document_len, c.train_words, c.docvecs_count, _doc_tag,
shrink_windows)
for i in range(document_len):
j = i - c.window # negative OK: will pad with null word
k = i + c.window + 1 # past document end OK: will pad with null word
# compose l1 & clear work
if _doc_tag < c.docvecs_count:
# doc vector(s)
memcpy(&c.neu1[0], &c.doctag_vectors[_doc_tag * c.vector_size], c.vector_size * cython.sizeof(REAL_t))
n = 0
for m in range(j, k):
# word vectors in window
if m == i:
continue
if m < 0 or m >= document_len:
c.window_indexes[n] = c.null_word_index
else:
c.window_indexes[n] = c.indexes[m]
n += 1
for m in range(2 * c.window):
memcpy(
&c.neu1[(c.doctag_len + m) * c.vector_size], &c.word_vectors[c.window_indexes[m] * c.vector_size],
c.vector_size * cython.sizeof(REAL_t))
memset(c.work, 0, c.layer1_size * cython.sizeof(REAL_t)) # work to accumulate l1 error
if c.hs:
fast_document_dmc_hs(
c.points[i], c.codes[i], c.codelens[i], c.neu1, c.syn1,
c.alpha, c.work, c.layer1_size, c.vector_size, c.learn_hidden)
if c.negative:
c.next_random = fast_document_dmc_neg(
c.negative, c.cum_table, c.cum_table_len, c.next_random, c.neu1, c.syn1neg,
c.indexes[i], c.alpha, c.work, c.layer1_size, c.vector_size, c.learn_hidden)
if c.learn_doctags and _doc_tag < c.docvecs_count:
our_saxpy(
&c.vector_size, &c.doctags_lockf[_doc_tag % c.doctags_lockf_len], &c.work[m * c.vector_size],
&ONE, &c.doctag_vectors[_doc_tag * c.vector_size], &ONE)
if c.learn_words:
for m in range(2 * c.window):
our_saxpy(
&c.vector_size, &c.words_lockf[c.window_indexes[m] % c.words_lockf_len], &c.work[(c.doctag_len + m) * c.vector_size],
&ONE, &c.word_vectors[c.window_indexes[m] * c.vector_size], &ONE)
total_documents += 1
total_effective_words += effective_words
_doc_tag += 1
c.alpha = get_next_alpha(
start_alpha, end_alpha, total_documents, total_words, expected_examples,
expected_words, cur_epoch, num_epochs)
return total_documents, total_effective_words, total_words
CORPUSFILE_VERSION = 1
| 24,931
|
Python
|
.py
| 470
| 41.565957
| 145
| 0.611891
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,111
|
translation_matrix.py
|
piskvorky_gensim/gensim/models/translation_matrix.py
|
#!/usr/bin/env python
# encoding: utf-8
"""Produce a translation matrix to translate words from one language to another, using either
a standard nearest neighbour method or a globally corrected neighbour retrieval method [1]_.
This method can be used to augment the existing phrase tables with more candidate translations, or
filter out errors from the translation tables and known dictionaries [2]_. What's more, it also works
for any two sets of named-vectors where there are some paired-guideposts to learn the transformation.
Examples
--------
How to make translation between two set of word-vectors
=======================================================
Initialize two word-vector models
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>> from gensim.test.utils import datapath
>>>
>>> model_en = KeyedVectors.load_word2vec_format(datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
>>> model_it = KeyedVectors.load_word2vec_format(datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
Define word pairs (that will be used for construction of translation matrix)
.. sourcecode:: pycon
>>> word_pairs = [
... ("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque"),
... ("seven", "sette"), ("eight", "otto"),
... ("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
... ("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
... ]
Fit :class:`~gensim.models.translation_matrix.TranslationMatrix`
.. sourcecode:: pycon
>>> trans_model = TranslationMatrix(model_en, model_it, word_pairs=word_pairs)
Apply model (translate words "dog" and "one")
.. sourcecode:: pycon
>>> trans_model.translate(["dog", "one"], topn=3)
OrderedDict([('dog', [u'cane', u'gatto', u'cavallo']), ('one', [u'uno', u'due', u'tre'])])
Save / load model
.. sourcecode:: pycon
>>> with temporary_file("model_file") as fname:
... trans_model.save(fname) # save model to file
... loaded_trans_model = TranslationMatrix.load(fname) # load model
How to make translation between two :class:`~gensim.models.doc2vec.Doc2Vec` models
==================================================================================
Prepare data and models
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.test.test_translation_matrix import read_sentiment_docs
>>> from gensim.models import Doc2Vec
>>>
>>> data = read_sentiment_docs(datapath("alldata-id-10.txt"))[:5]
>>> src_model = Doc2Vec.load(datapath("small_tag_doc_5_iter50"))
>>> dst_model = Doc2Vec.load(datapath("large_tag_doc_10_iter50"))
Train backward translation
.. sourcecode:: pycon
>>> model_trans = BackMappingTranslationMatrix(data, src_model, dst_model)
>>> trans_matrix = model_trans.train(data)
Apply model
.. sourcecode:: pycon
>>> result = model_trans.infer_vector(dst_model.dv[data[3].tags])
References
----------
.. [1] Dinu, Georgiana, Angeliki Lazaridou, and Marco Baroni. "Improving zero-shot learning by mitigating the
hubness problem", https://arxiv.org/abs/1412.6568
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
"Distributed Representations of Words and Phrases and their Compositionality", https://arxiv.org/abs/1310.4546
"""
import warnings
from collections import OrderedDict
import numpy as np
from gensim import utils
class Space:
"""An auxiliary class for storing the the words space."""
def __init__(self, matrix, index2word):
"""
Parameters
----------
matrix : iterable of numpy.ndarray
Matrix that contains word-vectors.
index2word : list of str
Words which correspond to the `matrix`.
"""
self.mat = matrix
self.index2word = index2word
# build a dict to map word to index
self.word2index = {}
for idx, word in enumerate(self.index2word):
self.word2index[word] = idx
@classmethod
def build(cls, lang_vec, lexicon=None):
"""Construct a space class for the lexicon, if it's provided.
Parameters
----------
lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Model from which the vectors will be extracted.
lexicon : list of str, optional
Words which contains in the `lang_vec`, if `lexicon = None`, the lexicon is all the lang_vec's word.
Returns
-------
:class:`~gensim.models.translation_matrix.Space`
Object that stored word-vectors
"""
# `words` to store all the words
# `mat` to store the word vector for each word in the 'words' list
words = []
mat = []
if lexicon is not None:
# if the lexicon is not provided, using all the Keyedvectors's words as default
for item in lexicon:
words.append(item)
mat.append(lang_vec.vectors[lang_vec.get_index(item)])
else:
for item in lang_vec.index_to_key:
words.append(item)
mat.append(lang_vec.vectors[lang_vec.get_index(item)])
return Space(mat, words)
def normalize(self):
"""Normalize the word vectors matrix."""
self.mat = self.mat / np.sqrt(np.sum(np.square(self.mat), axis=1, keepdims=True))
class TranslationMatrix(utils.SaveLoad):
"""Objects of this class realize the translation matrix which maps the source language to the target language.
The main methods are:
We map it to the other language space by computing z = Wx, then return the
word whose representation is close to z.
For details on use, see the tutorial notebook [3]_
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>> from gensim.test.utils import datapath
>>> en = datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
>>> it = datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
>>> model_en = KeyedVectors.load_word2vec_format(en)
>>> model_it = KeyedVectors.load_word2vec_format(it)
>>>
>>> word_pairs = [
... ("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque"),
... ("seven", "sette"), ("eight", "otto"),
... ("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
... ("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
... ]
>>>
>>> trans_model = TranslationMatrix(model_en, model_it)
>>> trans_model.train(word_pairs)
>>> trans_model.translate(["dog", "one"], topn=3)
OrderedDict([('dog', [u'cane', u'gatto', u'cavallo']), ('one', [u'uno', u'due', u'tre'])])
References
----------
.. [3] https://github.com/RaRe-Technologies/gensim/blob/3.2.0/docs/notebooks/translation_matrix.ipynb
"""
def __init__(self, source_lang_vec, target_lang_vec, word_pairs=None, random_state=None):
"""
Parameters
----------
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for source language.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for target language.
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
random_state : {None, int, array_like}, optional
Seed for random state.
"""
self.source_word = None
self.target_word = None
self.source_lang_vec = source_lang_vec
self.target_lang_vec = target_lang_vec
self.random_state = utils.get_random_state(random_state)
self.translation_matrix = None
self.source_space = None
self.target_space = None
if word_pairs is not None:
if len(word_pairs[0]) != 2:
raise ValueError("Each training data item must contain two different language words.")
self.train(word_pairs)
def train(self, word_pairs):
"""Build the translation matrix to map from source space to target space.
Parameters
----------
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
"""
self.source_word, self.target_word = zip(*word_pairs)
self.source_space = Space.build(self.source_lang_vec, set(self.source_word))
self.target_space = Space.build(self.target_lang_vec, set(self.target_word))
self.source_space.normalize()
self.target_space.normalize()
m1 = self.source_space.mat[[self.source_space.word2index[item] for item in self.source_word], :]
m2 = self.target_space.mat[[self.target_space.word2index[item] for item in self.target_word], :]
self.translation_matrix = np.linalg.lstsq(m1, m2, -1)[0]
def save(self, *args, **kwargs):
"""Save the model to a file. Ignores (doesn't store) the `source_space` and `target_space` attributes."""
kwargs['ignore'] = kwargs.get('ignore', ['source_space', 'target_space'])
super(TranslationMatrix, self).save(*args, **kwargs)
def apply_transmat(self, words_space):
"""Map the source word vector to the target word vector using translation matrix.
Parameters
----------
words_space : :class:`~gensim.models.translation_matrix.Space`
`Space` object constructed for the words to be translated.
Returns
-------
:class:`~gensim.models.translation_matrix.Space`
`Space` object constructed for the mapped words.
"""
return Space(np.dot(words_space.mat, self.translation_matrix), words_space.index2word)
def translate(self, source_words, topn=5, gc=0, sample_num=None, source_lang_vec=None, target_lang_vec=None):
"""Translate the word from the source language to the target language.
Parameters
----------
source_words : {str, list of str}
Single word or a list of words to be translated
topn : int, optional
Number of words that will be returned as translation for each `source_words`
gc : int, optional
Define translation algorithm, if `gc == 0` - use standard NN retrieval,
otherwise, use globally corrected neighbour retrieval method (as described in [1]_).
sample_num : int, optional
Number of words to sample from the source lexicon, if `gc == 1`, then `sample_num` **must** be provided.
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New source language vectors for translation, by default, used the model's source language vector.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New target language vectors for translation, by default, used the model's target language vector.
Returns
-------
:class:`collections.OrderedDict`
Ordered dict where each item is `word`: [`translated_word_1`, `translated_word_2`, ...]
"""
if isinstance(source_words, str):
# pass only one word to translate
source_words = [source_words]
# If the language word vector not provided by user, use the model's
# language word vector as default
if source_lang_vec is None:
warnings.warn(
"The parameter source_lang_vec isn't specified, "
"use the model's source language word vector as default."
)
source_lang_vec = self.source_lang_vec
if target_lang_vec is None:
warnings.warn(
"The parameter target_lang_vec isn't specified, "
"use the model's target language word vector as default."
)
target_lang_vec = self.target_lang_vec
# If additional is provided, bootstrapping vocabulary from the source language word vector model.
if gc:
if sample_num is None:
raise RuntimeError(
"When using the globally corrected neighbour retrieval method, "
"the `sample_num` parameter(i.e. the number of words sampled from source space) must be provided."
)
lexicon = set(source_lang_vec.index_to_key)
addition = min(sample_num, len(lexicon) - len(source_words))
lexicon = self.random_state.choice(list(lexicon.difference(source_words)), addition)
source_space = Space.build(source_lang_vec, set(source_words).union(set(lexicon)))
else:
source_space = Space.build(source_lang_vec, source_words)
target_space = Space.build(target_lang_vec, )
# Normalize the source vector and target vector
source_space.normalize()
target_space.normalize()
# Map the source language to the target language
mapped_source_space = self.apply_transmat(source_space)
# Use the cosine similarity metric
sim_matrix = -np.dot(target_space.mat, mapped_source_space.mat.T)
# If `gc=1`, using corrected retrieval method
if gc:
srtd_idx = np.argsort(np.argsort(sim_matrix, axis=1), axis=1)
sim_matrix_idx = np.argsort(srtd_idx + sim_matrix, axis=0)
else:
sim_matrix_idx = np.argsort(sim_matrix, axis=0)
# Translate the words and for each word return the `topn` similar words
translated_word = OrderedDict()
for idx, word in enumerate(source_words):
translated_target_word = []
# Search the most `topn` similar words
for j in range(topn):
map_space_id = sim_matrix_idx[j, source_space.word2index[word]]
translated_target_word.append(target_space.index2word[map_space_id])
translated_word[word] = translated_target_word
return translated_word
class BackMappingTranslationMatrix(utils.SaveLoad):
"""Realize the BackMapping translation matrix which maps the source model's document vector
to the target model's document vector (old model).
BackMapping translation matrix is used to learn a mapping for two document vector spaces which we
specify as source document vector and target document vector. The target document vectors are trained
on a superset corpus of source document vectors; we can incrementally increase the vector in
the old model through the BackMapping translation matrix.
For details on use, see the tutorial notebook [3]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.test.test_translation_matrix import read_sentiment_docs
>>> from gensim.models import Doc2Vec, BackMappingTranslationMatrix
>>>
>>> data = read_sentiment_docs(datapath("alldata-id-10.txt"))[:5]
>>> src_model = Doc2Vec.load(datapath("small_tag_doc_5_iter50"))
>>> dst_model = Doc2Vec.load(datapath("large_tag_doc_10_iter50"))
>>>
>>> model_trans = BackMappingTranslationMatrix(src_model, dst_model)
>>> trans_matrix = model_trans.train(data)
>>>
>>> result = model_trans.infer_vector(dst_model.dv[data[3].tags])
"""
def __init__(self, source_lang_vec, target_lang_vec, tagged_docs=None, random_state=None):
"""
Parameters
----------
source_lang_vec : :class:`~gensim.models.doc2vec.Doc2Vec`
Source Doc2Vec model.
target_lang_vec : :class:`~gensim.models.doc2vec.Doc2Vec`
Target Doc2Vec model.
tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, optional.
Documents that will be used for training, both the source language document vector and
target language document vector trained on those tagged documents.
random_state : {None, int, array_like}, optional
Seed for random state.
"""
self.tagged_docs = tagged_docs
self.source_lang_vec = source_lang_vec
self.target_lang_vec = target_lang_vec
self.random_state = utils.get_random_state(random_state)
self.translation_matrix = None
if tagged_docs is not None:
self.train(tagged_docs)
def train(self, tagged_docs):
"""Build the translation matrix to map from the source model's vectors to target model's vectors
Parameters
----------
tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents
that will be used for training, both the source language document vector and
target language document vector trained on those tagged documents.
Returns
-------
numpy.ndarray
Translation matrix that maps from the source model's vectors to target model's vectors.
"""
m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs]
m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs]
self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0]
return self.translation_matrix
def infer_vector(self, target_doc_vec):
"""Translate the target model's document vector to the source model's document vector
Parameters
----------
target_doc_vec : numpy.ndarray
Document vector from the target document, whose document are not in the source model.
Returns
-------
numpy.ndarray
Vector `target_doc_vec` in the source model.
"""
return np.dot(target_doc_vec, self.translation_matrix)
| 18,050
|
Python
|
.py
| 351
| 42.752137
| 118
| 0.635402
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,112
|
ldaseqmodel.py
|
piskvorky_gensim/gensim/models/ldaseqmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# Based on Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
"""Lda Sequence model, inspired by
`David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
The original C/C++ implementation can be found on `blei-lab/dtm <https://github.com/blei-lab/dtm>`_.
TODO: The next steps to take this forward would be:
#. Include DIM mode. Most of the infrastructure for this is in place.
#. See if LdaPost can be replaced by LdaModel completely without breaking anything.
#. Heavy lifting going on in the Sslm class - efforts can be made to cythonise mathematical methods, in particular,
update_obs and the optimization takes a lot time.
#. Try and make it distributed, especially around the E and M step.
#. Remove all C/C++ coding style/syntax.
Examples
--------
Set up a model using 9 documents, with 2 in the first time-slice, 4 in the second, and 3 in the third
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus
>>> from gensim.models import LdaSeqModel
>>>
>>> ldaseq = LdaSeqModel(corpus=common_corpus, time_slice=[2, 4, 3], num_topics=2, chunksize=1)
Persist a model to disk and reload it later
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> temp_file = datapath("model")
>>> ldaseq.save(temp_file)
>>>
>>> # Load a potentially pre-trained model from disk.
>>> ldaseq = LdaSeqModel.load(temp_file)
Access the document embeddings generated from the DTM
.. sourcecode:: pycon
>>> doc = common_corpus[1]
>>>
>>> embedding = ldaseq[doc]
"""
import logging
import numpy as np
from scipy.special import digamma, gammaln
from scipy import optimize
from gensim import utils, matutils
from gensim.models import ldamodel
logger = logging.getLogger(__name__)
class LdaSeqModel(utils.SaveLoad):
"""Estimate Dynamic Topic Model parameters based on a training corpus."""
def __init__(
self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10,
initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10,
random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100,
):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaSeqModel.update` manually).
time_slice : list of int, optional
Number of documents in each time-slice. Each time slice could for example represent a year's published
papers, in case the corpus comes from a journal publishing over multiple years.
It is assumed that `sum(time_slice) == num_documents`.
id2word : dict of (int, str), optional
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
alphas : float, optional
The prior probability for the model.
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
initialize : {'gensim', 'own', 'ldamodel'}, optional
Controls the initialization of the DTM model. Supports three different modes:
* 'gensim': Uses gensim's LDA initialization.
* 'own': Uses your own initialization matrix of an LDA model that has been previously trained.
* 'lda_model': Use a previously used LDA model, passing it through the `lda_model` argument.
sstats : numpy.ndarray , optional
Sufficient statistics used for initializing the model if `initialize == 'own'`. Corresponds to matrix
beta in the linked paper for time slice 0, expected shape (`self.vocab_len`, `num_topics`).
lda_model : :class:`~gensim.models.ldamodel.LdaModel`
Model whose sufficient statistics will be used to initialize the current object if `initialize == 'gensim'`.
obs_variance : float, optional
Observed variance used to approximate the true and forward variance as shown in
`David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
chain_variance : float, optional
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
passes : int, optional
Number of passes over the corpus for the initial :class:`~gensim.models.ldamodel.LdaModel`
random_state : {numpy.random.RandomState, int}, optional
Can be a np.random.RandomState object, or the seed to generate one. Used for reproducibility of results.
lda_inference_max_iter : int, optional
Maximum number of iterations in the inference step of the LDA training.
em_min_iter : int, optional
Minimum number of iterations until converge of the Expectation-Maximization algorithm
em_max_iter : int, optional
Maximum number of iterations until converge of the Expectation-Maximization algorithm.
chunksize : int, optional
Number of documents in the corpus do be processed in in a chunk.
"""
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.vocab_len = len(self.id2word)
elif self.id2word:
self.vocab_len = len(self.id2word)
else:
self.vocab_len = 0
if corpus is not None:
try:
self.corpus_len = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
self.corpus_len = sum(1 for _ in corpus)
self.time_slice = time_slice
if self.time_slice is not None:
self.num_time_slices = len(time_slice)
self.num_topics = num_topics
self.num_time_slices = len(time_slice)
self.alphas = np.full(num_topics, alphas)
# topic_chains contains for each topic a 'state space language model' object
# which in turn has information about each topic
# the sslm class is described below and contains information
# on topic-word probabilities and doc-topic probabilities.
self.topic_chains = []
for topic in range(num_topics):
sslm_ = sslm(
num_time_slices=self.num_time_slices, vocab_len=self.vocab_len, num_topics=self.num_topics,
chain_variance=chain_variance, obs_variance=obs_variance
)
self.topic_chains.append(sslm_)
# the following are class variables which are to be integrated during Document Influence Model
self.top_doc_phis = None
self.influence = None
self.renormalized_influence = None
self.influence_sum_lgl = None
# if a corpus and time_slice is provided, depending on the user choice of initializing LDA, we start DTM.
if corpus is not None and time_slice is not None:
self.max_doc_len = max(len(line) for line in corpus)
if initialize == 'gensim':
lda_model = ldamodel.LdaModel(
corpus, id2word=self.id2word, num_topics=self.num_topics,
passes=passes, alpha=self.alphas, random_state=random_state,
dtype=np.float64
)
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'ldamodel':
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'own':
self.sstats = sstats
# initialize model from sstats
self.init_ldaseq_ss(chain_variance, obs_variance, self.alphas, self.sstats)
# fit DTM
self.fit_lda_seq(corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize)
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
"""Initialize State Space Language Model, topic-wise.
Parameters
----------
topic_chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve.
topic_obs_variance : float
Observed variance used to approximate the true and forward variance as shown in
`David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
alpha : float
The prior probability for the model.
init_suffstats : numpy.ndarray
Sufficient statistics used for initializing the model, expected shape (`self.vocab_len`, `num_topics`).
"""
self.alphas = alpha
for k, chain in enumerate(self.topic_chains):
sstats = init_suffstats[:, k]
sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
# initialize the below matrices only if running DIM
# ldaseq.topic_chains[k].w_phi_l = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sum = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sq = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
"""Fit a LDA Sequence model (DTM).
This method will iteratively setup LDA models and perform EM steps until the sufficient statistics convergence,
or until the maximum number of iterations is reached. Because the true posterior is intractable, an
appropriately tight lower bound must be used instead. This function will optimize this bound, by minimizing
its true Kullback-Liebler Divergence with the true posterior.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
em_min_iter : int
Minimum number of time slices to be inspected.
em_max_iter : int
Maximum number of time slices to be inspected.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
float
The highest lower bound for the true posterior produced after all iterations.
"""
LDASQE_EM_THRESHOLD = 1e-4
# if bound is low, then we increase iterations.
LOWER_ITER = 10
ITER_MULT_LOW = 2
MAX_ITER = 500
num_topics = self.num_topics
vocab_len = self.vocab_len
data_len = self.num_time_slices
corpus_len = self.corpus_len
bound = 0
convergence = LDASQE_EM_THRESHOLD + 1
iter_ = 0
while iter_ < em_min_iter or ((convergence > LDASQE_EM_THRESHOLD) and iter_ <= em_max_iter):
logger.info(" EM iter %i", iter_)
logger.info("E Step")
# TODO: bound is initialized to 0
old_bound = bound
# initiate sufficient statistics
topic_suffstats = []
for topic in range(num_topics):
topic_suffstats.append(np.zeros((vocab_len, data_len)))
# set up variables
gammas = np.zeros((corpus_len, num_topics))
lhoods = np.zeros((corpus_len, num_topics + 1))
# compute the likelihood of a sequential corpus under an LDA
# seq model and find the evidence lower bound. This is the E - Step
bound, gammas = \
self.lda_seq_infer(corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize)
self.gammas = gammas
logger.info("M Step")
# fit the variational distribution. This is the M - Step
topic_bound = self.fit_lda_seq_topics(topic_suffstats)
bound += topic_bound
if (bound - old_bound) < 0:
# if max_iter is too low, increase iterations.
if lda_inference_max_iter < LOWER_ITER:
lda_inference_max_iter *= ITER_MULT_LOW
logger.info("Bound went down, increasing iterations to %i", lda_inference_max_iter)
# check for convergence
convergence = np.fabs((bound - old_bound) / old_bound)
if convergence < LDASQE_EM_THRESHOLD:
lda_inference_max_iter = MAX_ITER
logger.info("Starting final iterations, max iter is %i", lda_inference_max_iter)
convergence = 1.0
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, convergence)
iter_ += 1
return bound
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods,
iter_, lda_inference_max_iter, chunksize):
"""Inference (or E-step) for the lower bound EM optimization.
This is used to set up the gensim :class:`~gensim.models.ldamodel.LdaModel` to be used for each time-slice.
It also allows for Document Influence Model code to be written in.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
topic_suffstats : numpy.ndarray
Sufficient statistics for time slice 0, used for initializing the model if `initialize == 'own'`,
expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float
The total log probability lower bound for each topic. Corresponds to the phi variational parameters in the
linked paper.
iter_ : int
Current iteration.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
num_topics = self.num_topics
vocab_len = self.vocab_len
bound = 0.0
lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda.topics = np.zeros((vocab_len, num_topics))
ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=num_topics, lda=lda)
model = "DTM"
if model == "DTM":
bound, gammas = self.inferDTMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
elif model == "DIM":
self.InfluenceTotalFixed(corpus)
bound, gammas = self.inferDIMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
return bound, gammas
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize):
"""Compute the likelihood of a sequential corpus under an LDA seq model, and reports the likelihood bound.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float of length `self.num_topics`
The total log probability bound for each topic. Corresponds to phi from the linked paper.
lda : :class:`~gensim.models.ldamodel.LdaModel`
The trained LDA model of the previous iteration.
ldapost : :class:`~gensim.models.ldaseqmodel.LdaPost`
Posterior probability variables for the given LDA model. This will be used as the true (but intractable)
posterior.
iter_ : int
The current iteration.
bound : float
The LDA bound produced after all iterations.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
doc_index = 0 # overall doc_index in corpus
time = 0 # current time-slice
doc_num = 0 # doc-index in current time-slice
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
time_slice = np.cumsum(np.array(self.time_slice))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
# iterates chunk size for constant memory footprint
for doc in chunk:
# this is used to update the time_slice and create a new lda_seq slice every new time_slice
if doc_index > time_slice[time]:
time += 1
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
doc_num = 0
gam = gammas[doc_index]
lhood = lhoods[doc_index]
ldapost.gamma = gam
ldapost.lhood = lhood
ldapost.doc = doc
# TODO: replace fit_lda_post with appropriate ldamodel functions, if possible.
if iter_ == 0:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, None, lda_inference_max_iter=lda_inference_max_iter
)
else:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, self, lda_inference_max_iter=lda_inference_max_iter
)
if topic_suffstats is not None:
topic_suffstats = LdaPost.update_lda_seq_ss(ldapost, time, doc, topic_suffstats)
gammas[doc_index] = ldapost.gamma
bound += doc_lhood
doc_index += 1
doc_num += 1
return bound, gammas
def make_lda_seq_slice(self, lda, time):
"""Update the LDA model topic-word values using time slices.
Parameters
----------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model to be updated
time : int
The time slice assigned to the stationary model.
Returns
-------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model updated to reflect the passed time slice.
"""
for k in range(self.num_topics):
lda.topics[:, k] = self.topic_chains[k].e_log_prob[:, time]
lda.alpha = np.copy(self.alphas)
return lda
def fit_lda_seq_topics(self, topic_suffstats):
"""Fit the sequential model topic-wise.
Parameters
----------
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The sum of the optimized lower bounds for all topics.
"""
lhood = 0
for k, chain in enumerate(self.topic_chains):
logger.info("Fitting topic number %i", k)
lhood_term = sslm.fit_sslm(chain, topic_suffstats[k])
lhood += lhood_term
return lhood
def print_topic_times(self, topic, top_terms=20):
"""Get the most relevant words for a topic, for each timeslice. This can be used to inspect the evolution of a
topic through time.
Parameters
----------
topic : int
The index of the topic.
top_terms : int, optional
Number of most relevant words associated with the topic to be returned.
Returns
-------
list of list of str
Top `top_terms` relevant terms for the topic for each time slice.
"""
topics = []
for time in range(self.num_time_slices):
topics.append(self.print_topic(topic, time, top_terms))
return topics
def print_topics(self, time=0, top_terms=20):
"""Get the most relevant words for every topic.
Parameters
----------
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of most relevant words to be returned for each topic.
Returns
-------
list of list of (str, float)
Representation of all topics. Each of them is represented by a list of pairs of words and their assigned
probability.
"""
return [self.print_topic(topic, time, top_terms) for topic in range(self.num_topics)]
def print_topic(self, topic, time=0, top_terms=20):
"""Get the list of words most relevant to the given topic.
Parameters
----------
topic : int
The index of the topic to be inspected.
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of words associated with the topic to be returned.
Returns
-------
list of (str, float)
The representation of this topic. Each element in the list includes the word itself, along with the
probability assigned to it by the topic.
"""
topic = self.topic_chains[topic].e_log_prob
topic = np.transpose(topic)
topic = np.exp(topic[time])
topic = topic / topic.sum()
bestn = matutils.argsort(topic, top_terms, reverse=True)
beststr = [(self.id2word[id_], topic[id_]) for id_ in bestn]
return beststr
def doc_topics(self, doc_number):
"""Get the topic mixture for a document.
Uses the priors for the dirichlet distribution that approximates the true posterior with the optimal
lower bound, and therefore requires the model to be already trained.
Parameters
----------
doc_number : int
Index of the document for which the mixture is returned.
Returns
-------
list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
"""
doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]
return doc_topic[doc_number]
def dtm_vis(self, time, corpus):
"""Get the information needed to visualize the corpus model at a given time slice, using the pyLDAvis format.
Parameters
----------
time : int
The time slice we are interested in.
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
The corpus we want to visualize at the given time slice.
Returns
-------
doc_topics : list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
topic_term : numpy.ndarray
The representation of each topic as a multinomial over words in the vocabulary,
expected shape (`num_topics`, vocabulary length).
doc_lengths : list of int
The number of words in each document. These could be fixed, or drawn from a Poisson distribution.
term_frequency : numpy.ndarray
The term frequency matrix (denoted as beta in the original Blei paper). This could also be the TF-IDF
representation of the corpus, expected shape (number of documents, length of vocabulary).
vocab : list of str
The set of unique terms existing in the cropuse's vocabulary.
"""
doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]
def normalize(x):
return x / x.sum()
topic_term = [
normalize(np.exp(chain.e_log_prob.T[time]))
for k, chain in enumerate(self.topic_chains)
]
doc_lengths = []
term_frequency = np.zeros(self.vocab_len)
for doc_no, doc in enumerate(corpus):
doc_lengths.append(len(doc))
for term, freq in doc:
term_frequency[term] += freq
vocab = [self.id2word[i] for i in range(len(self.id2word))]
return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab
def dtm_coherence(self, time):
"""Get the coherence for each topic.
Can be used to measure the quality of the model, or to inspect the convergence through training via a callback.
Parameters
----------
time : int
The time slice.
Returns
-------
list of list of str
The word representation for each topic, for each time slice. This can be used to check the time coherence
of topics as time evolves: If the most relevant words remain the same then the topic has somehow
converged or is relatively static, if they change rapidly the topic is evolving.
"""
coherence_topics = []
for topics in self.print_topics(time):
coherence_topic = []
for word, dist in topics:
coherence_topic.append(word)
coherence_topics.append(coherence_topic)
return coherence_topics
def __getitem__(self, doc):
"""Get the topic mixture for the given document, using the inferred approximation of the true posterior.
Parameters
----------
doc : list of (int, float)
The doc in BOW format. Can be an unseen document.
Returns
-------
list of float
Probabilities for each topic in the mixture. This is essentially a point in the `num_topics - 1` simplex.
"""
lda_model = ldamodel.LdaModel(
num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda_model.topics = np.zeros((self.vocab_len, self.num_topics))
ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)
time_lhoods = []
for time in range(self.num_time_slices):
lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice
lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)
time_lhoods.append(lhood)
doc_topic = ldapost.gamma / ldapost.gamma.sum()
# should even the likelihoods be returned?
return doc_topic
class sslm(utils.SaveLoad):
"""Encapsulate the inner State Space Language Model for DTM.
Some important attributes of this class:
* `obs` is a matrix containing the document to topic ratios.
* `e_log_prob` is a matrix containing the topic to word ratios.
* `mean` contains the mean values to be used for inference for each word for a time slice.
* `variance` contains the variance values to be used for inference of word in a time slice.
* `fwd_mean` and`fwd_variance` are the forward posterior values for the mean and the variance.
* `zeta` is an extra variational parameter with a value for each time slice.
"""
def __init__(self, vocab_len=None, num_time_slices=None, num_topics=None, obs_variance=0.5, chain_variance=0.005):
self.vocab_len = vocab_len
self.num_time_slices = num_time_slices
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.num_topics = num_topics
# setting up matrices
self.obs = np.zeros((vocab_len, num_time_slices))
self.e_log_prob = np.zeros((vocab_len, num_time_slices))
self.mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_variance = np.zeros((vocab_len, num_time_slices + 1))
self.variance = np.zeros((vocab_len, num_time_slices + 1))
self.zeta = np.zeros(num_time_slices)
# the following are class variables which are to be integrated during Document Influence Model
self.m_update_coeff = None
self.mean_t = None
self.variance_t = None
self.influence_sum_lgl = None
self.w_phi_l = None
self.w_phi_sum = None
self.w_phi_l_sq = None
self.m_update_coeff_g = None
def update_zeta(self):
"""Update the Zeta variational parameter.
Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)),
over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.
Returns
-------
list of float
The updated zeta values for each time slice.
"""
for j, val in enumerate(self.zeta):
self.zeta[j] = np.sum(np.exp(self.mean[:, j + 1] + self.variance[:, j + 1] / 2))
return self.zeta
def compute_post_variance(self, word, chain_variance):
r"""Get the variance, based on the
`Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
This function accepts the word to compute variance for, along with the associated sslm class object,
and returns the `variance` and the posterior approximation `fwd_variance`.
Notes
-----
This function essentially computes Var[\beta_{t,w}] for t = 1:T
.. :math::
fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) =
(obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) *
(fwd\_variance[t - 1] + obs\_variance)
.. :math::
variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) =
fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 *
(variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the variance of each word in each time slice, the second value is the
inferred posterior variance for the same pairs.
"""
INIT_VARIANCE_CONST = 1000
T = self.num_time_slices
variance = self.variance[word]
fwd_variance = self.fwd_variance[word]
# forward pass. Set initial variance very high
fwd_variance[0] = chain_variance * INIT_VARIANCE_CONST
for t in range(1, T + 1):
if self.obs_variance:
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
else:
c = 0
fwd_variance[t] = c * (fwd_variance[t - 1] + chain_variance)
# backward pass
variance[T] = fwd_variance[T]
for t in range(T - 1, -1, -1):
if fwd_variance[t] > 0.0:
c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2)
else:
c = 0
variance[t] = (c * (variance[t + 1] - chain_variance)) + ((1 - c) * fwd_variance[t])
return variance, fwd_variance
def compute_post_mean(self, word, chain_variance):
"""Get the mean, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
Notes
-----
This function essentially computes E[\beta_{t,w}] for t = 1:T.
.. :math::
Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t )
= (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] +
(1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta
.. :math::
Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T )
= fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) +
(1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the mean of each word in each time slice, the second value is the
inferred posterior mean for the same pairs.
"""
T = self.num_time_slices
obs = self.obs[word]
fwd_variance = self.fwd_variance[word]
mean = self.mean[word]
fwd_mean = self.fwd_mean[word]
# forward
fwd_mean[0] = 0
for t in range(1, T + 1):
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
fwd_mean[t] = c * fwd_mean[t - 1] + (1 - c) * obs[t - 1]
# backward pass
mean[T] = fwd_mean[T]
for t in range(T - 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is implemented as in the original
Blei DTM code.
Returns
-------
numpy.ndarray of float
The expected value for the log probabilities for each word and time slice.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""Initialize the State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes initial mean, variance and Topic-Word probabilities
for the first time-slice.
Parameters
----------
obs_variance : float, optional
Observed variance used to approximate the true and forward variance.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
sstats : numpy.ndarray
Sufficient statistics of the LDA model. Corresponds to matrix beta in the linked paper for time slice 0,
expected shape (`self.vocab_len`, `num_topics`).
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts /= sum(log_norm_counts)
log_norm_counts += 1.0 / W
log_norm_counts /= sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""Fits variational distribution.
This is essentially the m-step.
Maximizes the approximation of the true posterior for a particular topic using the provided sufficient
statistics. Updates the values using :meth:`~gensim.models.ldaseqmodel.sslm.update_obs` and
:meth:`~gensim.models.ldaseqmodel.sslm.compute_expected_log_prob`.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the
current time slice, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The lower bound for the true posterior achieved using the fitted approximate distribution.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in zip(*(self.compute_post_variance(w, self.chain_variance) for w in range(W))))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""Compute the maximized lower bound achieved for the log probability of the true posterior.
Uses the formula presented in the appendix of the DTM paper (formula no. 5).
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
float
The maximized lower bound.
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*(self.compute_post_mean(w, self.chain_variance) for w in range(w))))
self.zeta = self.update_zeta()
val = sum(self.variance[w][0] - self.variance[w][t] for w in range(w)) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are always zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""Optimize the bound with respect to the observed variables.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
(numpy.ndarray of float, numpy.ndarray of float)
The updated optimized values for obs and the zeta variational parameter.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.zeros((T, T + 1))
norm_cutoff_obs = None
for w in range(W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(T):
mean_deriv_mtx[t] = self.compute_mean_deriv(w, t, mean_deriv_mtx[t])
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""Helper functions for optimizing a function.
Compute the derivative of:
.. :math::
E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
Parameters
----------
word : int
The word's ID.
time : int
The time slice.
deriv : list of float
Derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""Derivation of obs which is used in derivative function `df_obs` while optimizing.
Parameters
----------
word : int
The word's ID.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
# flag
init_mult = 1000
T = self.num_time_slices
mean = self.mean[word]
variance = self.variance[word]
# only used for DIM mode
# w_phi_l = self.w_phi_l[word]
# m_update_coeff = self.m_update_coeff[word]
# temp_vector holds temporary zeta values
self.temp_vect = np.zeros(T)
for u in range(T):
self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)
for t in range(T):
mean_deriv = mean_deriv_mtx[t]
term1 = 0
term2 = 0
term3 = 0
term4 = 0
for u in range(1, T + 1):
mean_u = mean[u]
mean_u_prev = mean[u - 1]
dmean_u = mean_deriv[u]
dmean_u_prev = mean_deriv[u - 1]
term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)
term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u
model = "DTM"
if model == "DIM":
# do some stuff
pass
if self.chain_variance:
term1 = - (term1 / self.chain_variance)
term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)
else:
term1 = 0.0
deriv[t] = term1 + term2 + term3 + term4
return deriv
class LdaPost(utils.SaveLoad):
"""Posterior values associated with each set of documents.
TODO: use **Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
to update phi, gamma. End game would be to somehow replace LdaPost entirely with LdaModel.
"""
def __init__(self, doc=None, lda=None, max_doc_len=None, num_topics=None, gamma=None, lhood=None):
"""Initialize the posterior value structure for the given LDA model.
Parameters
----------
doc : list of (int, int)
A BOW representation of the document. Each element in the list is a pair of a word's ID and its number
of occurences in the document.
lda : :class:`~gensim.models.ldamodel.LdaModel`, optional
The underlying LDA model.
max_doc_len : int, optional
The maximum number of words in a document.
num_topics : int, optional
Number of topics discovered by the LDA model.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhood : float, optional
The log likelihood lower bound.
"""
self.doc = doc
self.lda = lda
self.gamma = gamma
self.lhood = lhood
if self.gamma is None:
self.gamma = np.zeros(num_topics)
if self.lhood is None:
self.lhood = np.zeros(num_topics + 1)
if max_doc_len is not None and num_topics is not None:
self.phi = np.zeros((max_doc_len, num_topics))
self.log_phi = np.zeros((max_doc_len, num_topics))
# the following are class variables which are to be integrated during Document Influence Model
self.doc_weight = None
self.renormalized_doc_weight = None
def update_phi(self, doc_number, time):
"""Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(Ψ(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in
**Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
Parameters
----------
doc_number : int
Document number. Unused.
time : int
Time slice. Unused.
Returns
-------
(list of float, list of float)
Multinomial parameters, and their logarithm, for each word in the document.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n += 1 # increase iteration
return self.phi, self.log_phi
def update_gamma(self):
"""Update variational dirichlet parameters.
This operations is described in the original Blei LDA paper:
gamma = alpha + sum(phi), over every topic for every word.
Returns
-------
list of float
The updated gamma parameters for each word in the document.
"""
self.gamma = np.copy(self.lda.alpha)
n = 0 # keep track of number of iterations for phi, log_phi
for word_id, count in self.doc:
phi_row = self.phi[n]
for k in range(self.lda.num_topics):
self.gamma[k] += phi_row[k] * count
n += 1
return self.gamma
def init_lda_post(self):
"""Initialize variational posterior. """
total = sum(count for word_id, count in self.doc)
self.gamma.fill(self.lda.alpha[0] + float(total) / self.lda.num_topics)
self.phi[:len(self.doc), :] = 1.0 / self.lda.num_topics
# doc_weight used during DIM
# ldapost.doc_weight = None
def compute_lda_lhood(self):
"""Compute the log likelihood bound.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
num_topics = self.lda.num_topics
gamma_sum = np.sum(self.gamma)
# to be used in DIM
# sigma_l = 0
# sigma_d = 0
lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
self.lhood[num_topics] = lhood
# influence_term = 0
digsum = digamma(gamma_sum)
model = "DTM" # noqa:F841
for k in range(num_topics):
# below code only to be used in DIM mode
# if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
# influence_topic = ldapost.doc_weight[k]
# influence_term = \
# - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))
e_log_theta_k = digamma(self.gamma[k]) - digsum
lhood_term = \
(self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + \
gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
# TODO: check why there's an IF
n = 0
for word_id, count in self.doc:
if self.phi[n][k] > 0:
lhood_term += \
count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
n += 1
self.lhood[k] = lhood_term
lhood += lhood_term
# in case of DIM add influence term
# lhood += influence_term
return lhood
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,
lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
"""Posterior inference for lda.
Parameters
----------
doc_number : int
The documents number.
time : int
Time slice.
ldaseq : object
Unused.
LDA_INFERENCE_CONVERGED : float
Epsilon value used to check whether the inference step has sufficiently converged.
lda_inference_max_iter : int
Maximum number of iterations in the inference step.
g : object
Unused. Will be useful when the DIM model is implemented.
g3_matrix: object
Unused. Will be useful when the DIM model is implemented.
g4_matrix: object
Unused. Will be useful when the DIM model is implemented.
g5_matrix: object
Unused. Will be useful when the DIM model is implemented.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
self.init_lda_post()
# sum of counts in a doc
total = sum(count for word_id, count in self.doc)
model = "DTM"
if model == "DIM":
# if in DIM then we initialise some variables here
pass
lhood = self.compute_lda_lhood()
lhood_old = 0
converged = 0
iter_ = 0
# first iteration starts here
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
return lhood
def update_lda_seq_ss(self, time, doc, topic_suffstats):
"""Update lda sequence sufficient statistics from an lda posterior.
This is very similar to the :meth:`~gensim.models.ldaseqmodel.LdaPost.update_gamma` method and uses
the same formula.
Parameters
----------
time : int
The time slice.
doc : list of (int, float)
Unused but kept here for backwards compatibility. The document set in the constructor (`self.doc`) is used
instead.
topic_suffstats : list of float
Sufficient statistics for each topic.
Returns
-------
list of float
The updated sufficient statistics for each topic.
"""
num_topics = self.lda.num_topics
for k in range(num_topics):
topic_ss = topic_suffstats[k]
n = 0
for word_id, count in self.doc:
topic_ss[word_id][time] += count * self.phi[n][k]
n += 1
topic_suffstats[k] = topic_ss
return topic_suffstats
# the following functions are used in update_obs as the objective function.
def f_obs(x, *args):
"""Function which we are optimising for minimizing obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The value of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
# flag
init_mult = 1000
T = len(x)
val = 0
term1 = 0
term2 = 0
# term 3 and 4 for DIM
term3 = 0
term4 = 0
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
mean = sslm.mean[word]
variance = sslm.variance[word]
# only used for DIM mode
# w_phi_l = sslm.w_phi_l[word]
# m_update_coeff = sslm.m_update_coeff[word]
for t in range(1, T + 1):
mean_t = mean[t]
mean_t_prev = mean[t - 1]
val = mean_t - mean_t_prev
term1 += val * val
term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]
model = "DTM"
if model == "DIM":
# stuff happens
pass
if sslm.chain_variance > 0.0:
term1 = - (term1 / (2 * sslm.chain_variance))
term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)
else:
term1 = 0.0
final = -(term1 + term2 + term3 + term4)
return final
def df_obs(x, *args):
"""Derivative of the objective function which optimises obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The derivative of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
model = "DTM"
if model == "DTM":
deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)
elif model == "DIM":
deriv = sslm.compute_obs_deriv_fixed(
p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821
return np.negative(deriv)
| 62,174
|
Python
|
.py
| 1,307
| 36.844682
| 120
| 0.597555
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,113
|
fasttext.py
|
piskvorky_gensim/gensim/models/fasttext.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Gensim Contributors
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
Introduction
------------
Learn word representations via fastText: `Enriching Word Vectors with Subword Information
<https://arxiv.org/abs/1607.04606>`_.
This module allows training word embeddings from a training corpus with the additional ability to obtain word vectors
for out-of-vocabulary words.
This module contains a fast native C implementation of fastText with Python interfaces. It is **not** only a wrapper
around Facebook's implementation.
This module supports loading models trained with Facebook's fastText implementation.
It also supports continuing training from such models.
For a tutorial see :ref:`sphx_glr_auto_examples_tutorials_run_fasttext.py`.
Usage examples
--------------
Initialize and train a model:
.. sourcecode:: pycon
>>> from gensim.models import FastText
>>> from gensim.test.utils import common_texts # some example sentences
>>>
>>> print(common_texts[0])
['human', 'interface', 'computer']
>>> print(len(common_texts))
9
>>> model = FastText(vector_size=4, window=3, min_count=1) # instantiate
>>> model.build_vocab(corpus_iterable=common_texts)
>>> model.train(corpus_iterable=common_texts, total_examples=len(common_texts), epochs=10) # train
Once you have a model, you can access its keyed vectors via the `model.wv` attributes.
The keyed vectors instance is quite powerful: it can perform a wide range of NLP tasks.
For a full list of examples, see :class:`~gensim.models.keyedvectors.KeyedVectors`.
You can also pass all the above parameters to the constructor to do everything
in a single line:
.. sourcecode:: pycon
>>> model2 = FastText(vector_size=4, window=3, min_count=1, sentences=common_texts, epochs=10)
The two models above are instantiated differently, but behave identically.
For example, we can compare the embeddings they've calculated for the word "computer":
.. sourcecode:: pycon
>>> import numpy as np
>>>
>>> np.allclose(model.wv['computer'], model2.wv['computer'])
True
In the above examples, we trained the model from sentences (lists of words) loaded into memory.
This is OK for smaller datasets, but for larger datasets, we recommend streaming the file,
for example from disk or the network.
In Gensim, we refer to such datasets as "corpora" (singular "corpus"), and keep them
in the format described in :class:`~gensim.models.word2vec.LineSentence`.
Passing a corpus is simple:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> corpus_file = datapath('lee_background.cor') # absolute path to corpus
>>> model3 = FastText(vector_size=4, window=3, min_count=1)
>>> model3.build_vocab(corpus_file=corpus_file) # scan over corpus to build the vocabulary
>>>
>>> total_words = model3.corpus_total_words # number of words in the corpus
>>> model3.train(corpus_file=corpus_file, total_words=total_words, epochs=5)
The model needs the `total_words` parameter in order to
manage the training rate (alpha) correctly, and to give accurate progress estimates.
The above example relies on an implementation detail: the
:meth:`~gensim.models.fasttext.FastText.build_vocab` method
sets the `corpus_total_words` (and also `corpus_count`) model attributes.
You may calculate them by scanning over the corpus yourself, too.
If you have a corpus in a different format, then you can use it by wrapping it
in an `iterator <https://wiki.python.org/moin/Iterator>`_.
Your iterator should yield a list of strings each time, where each string should be a separate word.
Gensim will take care of the rest:
.. sourcecode:: pycon
>>> from gensim.utils import tokenize
>>> from gensim import utils
>>>
>>>
>>> class MyIter:
... def __iter__(self):
... path = datapath('crime-and-punishment.txt')
... with utils.open(path, 'r', encoding='utf-8') as fin:
... for line in fin:
... yield list(tokenize(line))
>>>
>>>
>>> model4 = FastText(vector_size=4, window=3, min_count=1)
>>> model4.build_vocab(corpus_iterable=MyIter())
>>> total_examples = model4.corpus_count
>>> model4.train(corpus_iterable=MyIter(), total_examples=total_examples, epochs=5)
Persist a model to disk with:
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile
>>>
>>> fname = get_tmpfile("fasttext.model")
>>>
>>> model.save(fname)
>>> model = FastText.load(fname)
Once loaded, such models behave identically to those created from scratch.
For example, you can continue training the loaded model:
.. sourcecode:: pycon
>>> import numpy as np
>>>
>>> 'computation' in model.wv.key_to_index # New word, currently out of vocab
False
>>> old_vector = np.copy(model.wv['computation']) # Grab the existing vector
>>> new_sentences = [
... ['computer', 'aided', 'design'],
... ['computer', 'science'],
... ['computational', 'complexity'],
... ['military', 'supercomputer'],
... ['central', 'processing', 'unit'],
... ['onboard', 'car', 'computer'],
... ]
>>>
>>> model.build_vocab(new_sentences, update=True) # Update the vocabulary
>>> model.train(new_sentences, total_examples=len(new_sentences), epochs=model.epochs)
>>>
>>> new_vector = model.wv['computation']
>>> np.allclose(old_vector, new_vector, atol=1e-4) # Vector has changed, model has learnt something
False
>>> 'computation' in model.wv.key_to_index # Word is still out of vocab
False
.. Important::
Be sure to call the :meth:`~gensim.models.fasttext.FastText.build_vocab`
method with `update=True` before the :meth:`~gensim.models.fasttext.FastText.train` method
when continuing training. Without this call, previously unseen terms
will not be added to the vocabulary.
You can also load models trained with Facebook's fastText implementation:
.. sourcecode:: pycon
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fb_model = load_facebook_model(cap_path)
Once loaded, such models behave identically to those trained from scratch.
You may continue training them on new data:
.. sourcecode:: pycon
>>> 'computer' in fb_model.wv.key_to_index # New word, currently out of vocab
False
>>> old_computer = np.copy(fb_model.wv['computer']) # Calculate current vectors
>>> fb_model.build_vocab(new_sentences, update=True)
>>> fb_model.train(new_sentences, total_examples=len(new_sentences), epochs=model.epochs)
>>> new_computer = fb_model.wv['computer']
>>> np.allclose(old_computer, new_computer, atol=1e-4) # Vector has changed, model has learnt something
False
>>> 'computer' in fb_model.wv.key_to_index # New word is now in the vocabulary
True
If you do not intend to continue training the model, consider using the
:func:`gensim.models.fasttext.load_facebook_vectors` function instead.
That function only loads the word embeddings (keyed vectors), consuming much less CPU and RAM:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> wv = load_facebook_vectors(cap_path)
>>>
>>> 'landlord' in wv.key_to_index # Word is out of vocabulary
False
>>> oov_vector = wv['landlord'] # Even OOV words have vectors in FastText
>>>
>>> 'landlady' in wv.key_to_index # Word is in the vocabulary
True
>>> iv_vector = wv['landlady']
Retrieve the word-vector for vocab and out-of-vocab word:
.. sourcecode:: pycon
>>> existent_word = "computer"
>>> existent_word in model.wv.key_to_index
True
>>> computer_vec = model.wv[existent_word] # numpy vector of a word
>>>
>>> oov_word = "graph-out-of-vocab"
>>> oov_word in model.wv.key_to_index
False
>>> oov_vec = model.wv[oov_word] # numpy vector for OOV word
You can perform various NLP word tasks with the model, some of them are already built-in:
.. sourcecode:: pycon
>>> similarities = model.wv.most_similar(positive=['computer', 'human'], negative=['interface'])
>>> most_similar = similarities[0]
>>>
>>> similarities = model.wv.most_similar_cosmul(positive=['computer', 'human'], negative=['interface'])
>>> most_similar = similarities[0]
>>>
>>> not_matching = model.wv.doesnt_match("human computer interface tree".split())
>>>
>>> sim_score = model.wv.similarity('computer', 'human')
Correlation with human opinion on word similarity:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> similarities = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
And on word analogies:
.. sourcecode:: pycon
>>> analogies_result = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))
"""
import logging
import numpy as np
from numpy import ones, vstack, float32 as REAL
import gensim.models._fasttext_bin
from gensim.models.word2vec import Word2Vec
from gensim.models.keyedvectors import KeyedVectors, prep_vectors
from gensim import utils
from gensim.utils import deprecated
try:
from gensim.models.fasttext_inner import ( # noqa: F401
train_batch_any,
MAX_WORDS_IN_BATCH,
compute_ngrams,
compute_ngrams_bytes,
ft_hash_bytes,
)
from gensim.models.fasttext_corpusfile import train_epoch_sg, train_epoch_cbow
except ImportError:
raise utils.NO_CYTHON
logger = logging.getLogger(__name__)
class FastText(Word2Vec):
def __init__(self, sentences=None, corpus_file=None, sg=0, hs=0, vector_size=100, alpha=0.025,
window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
negative=5, ns_exponent=0.75, cbow_mean=1, hashfxn=hash, epochs=5, null_word=0, min_n=3, max_n=6,
sorted_vocab=1, bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH, callbacks=(),
max_final_vocab=None, shrink_windows=True,):
"""Train, use and evaluate word representations learned using the method
described in `Enriching Word Vectors with Subword Information <https://arxiv.org/abs/1607.04606>`_,
aka FastText.
The model can be stored/loaded via its :meth:`~gensim.models.fasttext.FastText.save` and
:meth:`~gensim.models.fasttext.FastText.load` methods, or loaded from a format compatible with the
original Fasttext implementation via :func:`~gensim.models.fasttext.load_facebook_model`.
Parameters
----------
sentences : iterable of list of str, optional
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus'
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such
examples. If you don't supply `sentences`, the model is left uninitialized -- use if you plan to
initialize it in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left
uninitialized).
min_count : int, optional
The model ignores all words with total frequency lower than this.
vector_size : int, optional
Dimensionality of the word vectors.
window : int, optional
The maximum distance between the current and predicted word within a sentence.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
sg : {1, 0}, optional
Training algorithm: skip-gram if `sg=1`, otherwise CBOW.
hs : {1,0}, optional
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {1,0}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int, optional
Number of iterations (epochs) over the corpus.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during
:meth:`~gensim.models.fasttext.FastText.build_vocab` and is not stored as part of themodel.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
sorted_vocab : {1,0}, optional
If 1, sort the vocabulary by descending frequency before assigning word indices.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int, optional
Minimum length of char n-grams to be used for training word representations.
max_n : int, optional
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int, optional
In Facebook's FastText, "max length of word ngram" - but gensim only supports the
default of 1 (regular unigram word handling).
bucket : int, optional
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
The default value of 2000000 consumes as much memory as having 2000000 more in-vocabulary
words in your model.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`, optional
List of callbacks that need to be executed/run at specific stages during training.
max_final_vocab : int, optional
Limits the vocab to a target vocab size by automatically selecting
``min_count```. If the specified ``min_count`` is more than the
automatically calculated ``min_count``, the former will be used.
Set to ``None`` if not required.
shrink_windows : bool, optional
New in 4.1. Experimental.
If True, the effective window size is uniformly sampled from [1, `window`]
for each target word during training, to match the original word2vec algorithm's
approximate weighting of context words by distance. Otherwise, the effective
window size is always fixed to `window` words to either side.
Examples
--------
Initialize and train a `FastText` model:
.. sourcecode:: pycon
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> say_vector = model.wv['say'] # get vector for word
>>> of_vector = model.wv['of'] # get vector for out-of-vocab word
Attributes
----------
wv : :class:`~gensim.models.fasttext.FastTextKeyedVectors`
This object essentially contains the mapping between words and embeddings. These are similar to
the embedding computed in the :class:`~gensim.models.word2vec.Word2Vec`, however here we also
include vectors for n-grams. This allows the model to compute embeddings even for **unseen**
words (that do not exist in the vocabulary), as the aggregate of the n-grams included in the word.
After training the model, this attribute can be used directly to query those embeddings in various
ways. Check the module level docstring for some examples.
"""
self.load = utils.call_on_class_only
self.load_fasttext_format = utils.call_on_class_only
self.callbacks = callbacks
if word_ngrams != 1:
raise NotImplementedError("Gensim's FastText implementation does not yet support word_ngrams != 1.")
self.word_ngrams = word_ngrams
if max_n < min_n:
# with no eligible char-ngram lengths, no buckets need be allocated
bucket = 0
self.wv = FastTextKeyedVectors(vector_size, min_n, max_n, bucket)
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as desired after any vocab growth
self.wv.vectors_vocab_lockf = ones(1, dtype=REAL)
self.wv.vectors_ngrams_lockf = ones(1, dtype=REAL)
super(FastText, self).__init__(
sentences=sentences, corpus_file=corpus_file, workers=workers, vector_size=vector_size, epochs=epochs,
callbacks=callbacks, batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window,
max_vocab_size=max_vocab_size, max_final_vocab=max_final_vocab,
min_count=min_count, sample=sample, sorted_vocab=sorted_vocab,
null_word=null_word, ns_exponent=ns_exponent, hashfxn=hashfxn,
seed=seed, hs=hs, negative=negative, cbow_mean=cbow_mean,
min_alpha=min_alpha, shrink_windows=shrink_windows)
def _init_post_load(self, hidden_output):
num_vectors = len(self.wv.vectors)
vocab_size = len(self.wv)
vector_size = self.wv.vector_size
assert num_vectors > 0, 'expected num_vectors to be initialized already'
assert vocab_size > 0, 'expected vocab_size to be initialized already'
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as necessary
self.wv.vectors_ngrams_lockf = ones(1, dtype=REAL)
self.wv.vectors_vocab_lockf = ones(1, dtype=REAL)
if self.hs:
self.syn1 = hidden_output
if self.negative:
self.syn1neg = hidden_output
self.layer1_size = vector_size
def _clear_post_train(self):
"""Clear any cached values that training may have invalidated."""
super(FastText, self)._clear_post_train()
self.wv.adjust_vectors() # ensure composite-word vecs reflect latest training
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate memory that will be needed to train a model, and print the estimates to log."""
vocab_size = vocab_size or len(self.wv)
vec_size = self.vector_size * np.dtype(np.float32).itemsize
l1_size = self.layer1_size * np.dtype(np.float32).itemsize
report = report or {}
report['vocab'] = len(self.wv) * (700 if self.hs else 500)
report['syn0_vocab'] = len(self.wv) * vec_size
num_buckets = self.wv.bucket
if self.hs:
report['syn1'] = len(self.wv) * l1_size
if self.negative:
report['syn1neg'] = len(self.wv) * l1_size
if self.wv.bucket:
report['syn0_ngrams'] = self.wv.bucket * vec_size
num_ngrams = 0
for word in self.wv.key_to_index:
hashes = ft_ngram_hashes(word, self.wv.min_n, self.wv.max_n, self.wv.bucket)
num_ngrams += len(hashes)
# A list (64 bytes) with one np.array (100 bytes) per key, with a total of
# num_ngrams uint32s (4 bytes) amongst them.
# Only used during training, not stored with the model.
report['buckets_word'] = 64 + (100 * len(self.wv)) + (4 * num_ngrams) # TODO: caching & calc sensible?
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words, %i buckets and %i dimensions: %i bytes",
len(self.wv), num_buckets, self.vector_size, report['total'],
)
return report
def _do_train_epoch(
self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, **kwargs,
):
work, neu1 = thread_private_mem
if self.sg:
examples, tally, raw_tally = train_epoch_sg(
self, corpus_file, offset, cython_vocab, cur_epoch, total_examples, total_words, work, neu1,
)
else:
examples, tally, raw_tally = train_epoch_cbow(
self, corpus_file, offset, cython_vocab, cur_epoch, total_examples, total_words, work, neu1,
)
return examples, tally, raw_tally
def _do_train_job(self, sentences, alpha, inits):
"""Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
Parameters
----------
sentences : iterable of list of str
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
alpha : float
The current learning rate.
inits : tuple of (:class:`numpy.ndarray`, :class:`numpy.ndarray`)
Each worker's private work memory.
Returns
-------
(int, int)
Tuple of (effective word count after ignoring unknown words and sentence length trimming, total word count)
"""
work, neu1 = inits
tally = train_batch_any(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
@deprecated(
"Gensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. "
"init_sims() is now obsoleted and will be completely removed in future versions. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors. Obsoleted.
If you need a single unit-normalized vector for some key, call
:meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
``fasttext_model.wv.get_vector(key, norm=True)``.
To refresh norms after you performed some atypical out-of-band vector tampering,
call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.
Parameters
----------
replace : bool
If True, forget the original trained vectors and only keep the normalized ones.
You lose information if you do this.
"""
self.wv.init_sims(replace=replace)
@classmethod
@utils.deprecated(
'use load_facebook_vectors (to use pretrained embeddings) or load_facebook_model '
'(to continue training with the loaded full model, more RAM) instead'
)
def load_fasttext_format(cls, model_file, encoding='utf8'):
"""Deprecated.
Use :func:`gensim.models.fasttext.load_facebook_model` or
:func:`gensim.models.fasttext.load_facebook_vectors` instead.
"""
return load_facebook_model(model_file, encoding=encoding)
@utils.deprecated(
'use load_facebook_vectors (to use pretrained embeddings) or load_facebook_model '
'(to continue training with the loaded full model, more RAM) instead'
)
def load_binary_data(self, encoding='utf8'):
"""Load data from a binary file created by Facebook's native FastText.
Parameters
----------
encoding : str, optional
Specifies the encoding.
"""
m = _load_fasttext_format(self.file_name, encoding=encoding)
for attr, val in m.__dict__.items():
setattr(self, attr, val)
def save(self, *args, **kwargs):
"""Save the Fasttext model. This saved model can be loaded again using
:meth:`~gensim.models.fasttext.FastText.load`, which supports incremental training
and getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Store the model to this file.
See Also
--------
:meth:`~gensim.models.fasttext.FastText.load`
Load :class:`~gensim.models.fasttext.FastText` model.
"""
super(FastText, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved `FastText` model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.fasttext.FastText`
Loaded model.
See Also
--------
:meth:`~gensim.models.fasttext.FastText.save`
Save :class:`~gensim.models.fasttext.FastText` model.
"""
return super(FastText, cls).load(*args, rethrow=True, **kwargs)
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(FastText, self)._load_specials(*args, **kwargs)
if hasattr(self, 'bucket'):
# should only exist in one place: the wv subcomponent
self.wv.bucket = self.bucket
del self.bucket
class FastTextVocab(utils.SaveLoad):
"""This is a redundant class. It exists only to maintain backwards compatibility
with older gensim versions."""
class FastTextTrainables(utils.SaveLoad):
"""Obsolete class retained for backward-compatible load()s"""
def _pad_ones(m, new_len):
"""Pad array with additional entries filled with ones."""
if len(m) > new_len:
raise ValueError('the new number of rows %i must be greater than old %i' % (new_len, len(m)))
new_arr = np.ones(new_len, dtype=REAL)
new_arr[:len(m)] = m
return new_arr
def load_facebook_model(path, encoding='utf-8'):
"""Load the model from Facebook's native fasttext `.bin` output file.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed (i.e. end in `.bin.gz` or `.bin.bz2`).
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
model_file : str
Path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as Gensim requires only `.bin` file to the load entire fastText model.
encoding : str, optional
Specifies the file encoding.
Examples
--------
Load, infer, continue training:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fb_model = load_facebook_model(cap_path)
>>>
>>> 'landlord' in fb_model.wv.key_to_index # Word is out of vocabulary
False
>>> oov_term = fb_model.wv['landlord']
>>>
>>> 'landlady' in fb_model.wv.key_to_index # Word is in the vocabulary
True
>>> iv_term = fb_model.wv['landlady']
>>>
>>> new_sent = [['lord', 'of', 'the', 'rings'], ['lord', 'of', 'the', 'flies']]
>>> fb_model.build_vocab(new_sent, update=True)
>>> fb_model.train(sentences=new_sent, total_examples=len(new_sent), epochs=5)
Returns
-------
gensim.models.fasttext.FastText
The loaded model.
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_vectors` loads
the word embeddings only. Its faster, but does not enable you to continue
training.
"""
return _load_fasttext_format(path, encoding=encoding, full_model=True)
def load_facebook_vectors(path, encoding='utf-8'):
"""Load word embeddings from a model saved in Facebook's native fasttext `.bin` format.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed.
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
path : str
The location of the model file.
encoding : str, optional
Specifies the file encoding.
Returns
-------
gensim.models.fasttext.FastTextKeyedVectors
The word embeddings.
Examples
--------
Load and infer:
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fbkv = load_facebook_vectors(cap_path)
>>>
>>> 'landlord' in fbkv.key_to_index # Word is out of vocabulary
False
>>> oov_vector = fbkv['landlord']
>>>
>>> 'landlady' in fbkv.key_to_index # Word is in the vocabulary
True
>>> iv_vector = fbkv['landlady']
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_model` loads
the full model, not just word embeddings, and enables you to continue
model training.
"""
full_model = _load_fasttext_format(path, encoding=encoding, full_model=False)
return full_model.wv
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
def _check_model(m):
"""Model sanity checks. Run after everything has been completely initialized."""
if m.wv.vector_size != m.wv.vectors_ngrams.shape[1]:
raise ValueError(
'mismatch between vector size in model params (%s) and model vectors (%s)' % (
m.wv.vector_size, m.wv.vectors_ngrams,
)
)
if hasattr(m, 'syn1neg') and m.syn1neg is not None:
if m.wv.vector_size != m.syn1neg.shape[1]:
raise ValueError(
'mismatch between vector size in model params (%s) and trainables (%s)' % (
m.wv.vector_size, m.wv.vectors_ngrams,
)
)
if len(m.wv) != m.nwords:
raise ValueError(
'mismatch between final vocab size (%s words), and expected number of words (%s words)' % (
len(m.wv), m.nwords,
)
)
if len(m.wv) != m.vocab_size:
# expecting to log this warning only for pretrained french vector, wiki.fr
logger.warning(
"mismatch between final vocab size (%s words), and expected vocab size (%s words)",
len(m.wv), m.vocab_size,
)
def save_facebook_model(model, path, encoding="utf-8", lr_update_rate=100, word_ngrams=1):
"""Saves word embeddings to the Facebook's native fasttext `.bin` format.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
**This function saves only the .bin file**.
Parameters
----------
model : gensim.models.fasttext.FastText
FastText model to be saved.
path : str
Output path and filename (including `.bin` extension)
encoding : str, optional
Specifies the file encoding. Defaults to utf-8.
lr_update_rate : int
This parameter is used by Facebook fasttext tool, unused by Gensim.
It defaults to Facebook fasttext default value `100`.
In very rare circumstances you might wish to fiddle with it.
word_ngrams : int
This parameter is used by Facebook fasttext tool, unused by Gensim.
It defaults to Facebook fasttext default value `1`.
In very rare circumstances you might wish to fiddle with it.
Returns
-------
None
"""
fb_fasttext_parameters = {"lr_update_rate": lr_update_rate, "word_ngrams": word_ngrams}
gensim.models._fasttext_bin.save(model, path, fb_fasttext_parameters, encoding)
class FastTextKeyedVectors(KeyedVectors):
def __init__(self, vector_size, min_n, max_n, bucket, count=0, dtype=REAL):
"""Vectors and vocab for :class:`~gensim.models.fasttext.FastText`.
Implements significant parts of the FastText algorithm. For example,
the :func:`word_vec` calculates vectors for out-of-vocabulary (OOV)
entities. FastText achieves this by keeping vectors for ngrams:
adding the vectors for the ngrams of an entity yields the vector for the
entity.
Similar to a hashmap, this class keeps a fixed number of buckets, and
maps all ngrams to buckets using a hash function.
Parameters
----------
vector_size : int
The dimensionality of all vectors.
min_n : int
The minimum number of characters in an ngram
max_n : int
The maximum number of characters in an ngram
bucket : int
The number of buckets.
count : int, optional
If provided, vectors will be pre-allocated for at least this many vectors. (Otherwise
they can be added later.)
dtype : type, optional
Vector dimensions will default to `np.float32` (AKA `REAL` in some Gensim code) unless
another type is provided here.
Attributes
----------
vectors_vocab : np.array
Each row corresponds to a vector for an entity in the vocabulary.
Columns correspond to vector dimensions. When embedded in a full
FastText model, these are the full-word-token vectors updated
by training, whereas the inherited vectors are the actual per-word
vectors synthesized from the full-word-token and all subword (ngram)
vectors.
vectors_ngrams : np.array
A vector for each ngram across all entities in the vocabulary.
Each row is a vector that corresponds to a bucket.
Columns correspond to vector dimensions.
buckets_word : list of np.array
For each key (by its index), report bucket slots their subwords map to.
"""
super(FastTextKeyedVectors, self).__init__(vector_size=vector_size, count=count, dtype=dtype)
self.min_n = min_n
self.max_n = max_n
self.bucket = bucket # count of buckets, fka num_ngram_vectors
self.buckets_word = None # precalculated cache of buckets for each word's ngrams
self.vectors_vocab = np.zeros((count, vector_size), dtype=dtype) # fka (formerly known as) syn0_vocab
self.vectors_ngrams = None # must be initialized later
self.compatible_hash = True
@classmethod
def load(cls, fname_or_handle, **kwargs):
"""Load a previously saved `FastTextKeyedVectors` model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.fasttext.FastTextKeyedVectors`
Loaded model.
See Also
--------
:meth:`~gensim.models.fasttext.FastTextKeyedVectors.save`
Save :class:`~gensim.models.fasttext.FastTextKeyedVectors` model.
"""
return super(FastTextKeyedVectors, cls).load(fname_or_handle, **kwargs)
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(FastTextKeyedVectors, self)._load_specials(*args, **kwargs)
if not isinstance(self, FastTextKeyedVectors):
raise TypeError("Loaded object of type %s, not expected FastTextKeyedVectors" % type(self))
if not hasattr(self, 'compatible_hash') or self.compatible_hash is False:
raise TypeError(
"Pre-gensim-3.8.x fastText models with nonstandard hashing are no longer compatible. "
"Loading your old model into gensim-3.8.3 & re-saving may create a model compatible with gensim 4.x."
)
if not hasattr(self, 'vectors_vocab_lockf') and hasattr(self, 'vectors_vocab'):
self.vectors_vocab_lockf = ones(1, dtype=REAL)
if not hasattr(self, 'vectors_ngrams_lockf') and hasattr(self, 'vectors_ngrams'):
self.vectors_ngrams_lockf = ones(1, dtype=REAL)
# fixup mistakenly overdimensioned gensim-3.x lockf arrays
if len(self.vectors_vocab_lockf.shape) > 1:
self.vectors_vocab_lockf = ones(1, dtype=REAL)
if len(self.vectors_ngrams_lockf.shape) > 1:
self.vectors_ngrams_lockf = ones(1, dtype=REAL)
if not hasattr(self, 'buckets_word') or not self.buckets_word:
self.recalc_char_ngram_buckets()
if not hasattr(self, 'vectors') or self.vectors is None:
self.adjust_vectors() # recompose full-word vectors
def __contains__(self, word):
"""Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if current method returns True.
Parameters
----------
word : str
Input word.
Returns
-------
bool
True if `word` or any character ngrams in `word` are present in the vocabulary, False otherwise.
Note
----
This method **always** returns True with char ngrams, because of the way FastText works.
If you want to check if a word is an in-vocabulary term, use this instead:
.. pycon:
>>> from gensim.test.utils import datapath
>>> from gensim.models import FastText
>>> cap_path = datapath("crime-and-punishment.bin")
>>> model = FastText.load_fasttext_format(cap_path, full_model=False)
>>> 'steamtrain' in model.wv.key_to_index # If False, is an OOV term
False
"""
if self.bucket == 0: # check for the case when char ngrams not used
return word in self.key_to_index
else:
return True
def save(self, *args, **kwargs):
"""Save object.
Parameters
----------
fname : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.fasttext.FastTextKeyedVectors.load`
Load object.
"""
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Arrange any special handling for the gensim.utils.SaveLoad protocol"""
# don't save properties that are merely calculated from others
ignore = set(ignore).union(['buckets_word', 'vectors', ])
return super(FastTextKeyedVectors, self)._save_specials(
fname, separately, sep_limit, ignore, pickle_protocol, compress, subname)
def get_vector(self, word, norm=False):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
Input word.
norm : bool, optional
If True, resulting vector will be L2-normalized (unit Euclidean length).
Returns
-------
numpy.ndarray
Vector representation of `word`.
Raises
------
KeyError
If word and all its ngrams not in vocabulary.
"""
if word in self.key_to_index:
return super(FastTextKeyedVectors, self).get_vector(word, norm=norm)
elif self.bucket == 0:
raise KeyError('cannot calculate vector for OOV word without ngrams')
else:
word_vec = np.zeros(self.vectors_ngrams.shape[1], dtype=np.float32)
ngram_weights = self.vectors_ngrams
ngram_hashes = ft_ngram_hashes(word, self.min_n, self.max_n, self.bucket)
if len(ngram_hashes) == 0:
#
# If it is impossible to extract _any_ ngrams from the input
# word, then the best we can do is return a vector that points
# to the origin. The reference FB implementation does this,
# too.
#
# https://github.com/RaRe-Technologies/gensim/issues/2402
#
logger.warning('could not extract any ngrams from %r, returning origin vector', word)
return word_vec
for nh in ngram_hashes:
word_vec += ngram_weights[nh]
if norm:
return word_vec / np.linalg.norm(word_vec)
else:
return word_vec / len(ngram_hashes)
def get_sentence_vector(self, sentence):
"""Get a single 1-D vector representation for a given `sentence`.
This function is workalike of the official fasttext's get_sentence_vector().
Parameters
----------
sentence : list of (str or int)
list of words specified by string or int ids.
Returns
-------
numpy.ndarray
1-D numpy array representation of the `sentence`.
"""
return super(FastTextKeyedVectors, self).get_mean_vector(sentence)
def resize_vectors(self, seed=0):
"""Make underlying vectors match 'index_to_key' size; random-initialize any new rows."""
vocab_shape = (len(self.index_to_key), self.vector_size)
# Unlike in superclass, 'vectors_vocab' array is primary with 'vectors' derived from it & ngrams
self.vectors_vocab = prep_vectors(vocab_shape, prior_vectors=self.vectors_vocab, seed=seed)
ngrams_shape = (self.bucket, self.vector_size)
self.vectors_ngrams = prep_vectors(ngrams_shape, prior_vectors=self.vectors_ngrams, seed=seed + 1)
self.allocate_vecattrs()
self.norms = None
self.recalc_char_ngram_buckets() # ensure new words have precalc buckets
self.adjust_vectors() # ensure `vectors` filled as well (though may be nonsense pre-training)
def init_post_load(self, fb_vectors):
"""Perform initialization after loading a native Facebook model.
Expects that the vocabulary (self.key_to_index) has already been initialized.
Parameters
----------
fb_vectors : np.array
A matrix containing vectors for all the entities, including words
and ngrams. This comes directly from the binary model.
The order of the vectors must correspond to the indices in
the vocabulary.
"""
vocab_words = len(self)
assert fb_vectors.shape[0] == vocab_words + self.bucket, 'unexpected number of vectors'
assert fb_vectors.shape[1] == self.vector_size, 'unexpected vector dimensionality'
#
# The incoming vectors contain vectors for both words AND
# ngrams. We split them into two separate matrices, because our
# implementation treats them differently.
#
self.vectors_vocab = np.array(fb_vectors[:vocab_words, :])
self.vectors_ngrams = np.array(fb_vectors[vocab_words:, :])
self.recalc_char_ngram_buckets()
self.adjust_vectors() # calculate composite full-word vectors
def adjust_vectors(self):
"""Adjust the vectors for words in the vocabulary.
The adjustment composes the trained full-word-token vectors with
the vectors of the subword ngrams, matching the Facebook reference
implementation behavior.
"""
if self.bucket == 0:
self.vectors = self.vectors_vocab # no ngrams influence
return
self.vectors = self.vectors_vocab[:].copy()
for i, _ in enumerate(self.index_to_key):
ngram_buckets = self.buckets_word[i]
for nh in ngram_buckets:
self.vectors[i] += self.vectors_ngrams[nh]
self.vectors[i] /= len(ngram_buckets) + 1
def recalc_char_ngram_buckets(self):
"""
Scan the vocabulary, calculate ngrams and their hashes, and cache the list of ngrams for each known word.
"""
# TODO: evaluate if precaching even necessary, compared to recalculating as needed.
if self.bucket == 0:
self.buckets_word = [np.array([], dtype=np.uint32)] * len(self.index_to_key)
return
self.buckets_word = [None] * len(self.index_to_key)
for i, word in enumerate(self.index_to_key):
self.buckets_word[i] = np.array(
ft_ngram_hashes(word, self.min_n, self.max_n, self.bucket),
dtype=np.uint32,
)
def _pad_random(m, new_rows, rand):
"""Pad a matrix with additional rows filled with random values."""
_, columns = m.shape
low, high = -1.0 / columns, 1.0 / columns
suffix = rand.uniform(low, high, (new_rows, columns)).astype(REAL)
return vstack([m, suffix])
def _unpack(m, num_rows, hash2index, seed=1, fill=None):
"""Restore the array to its natural shape, undoing the optimization.
A packed matrix contains contiguous vectors for ngrams, as well as a hashmap.
The hash map maps the ngram hash to its index in the packed matrix.
To unpack the matrix, we need to do several things:
1. Restore the matrix to its "natural" shape, where the number of rows
equals the number of buckets.
2. Rearrange the existing rows such that the hashmap becomes the identity
function and is thus redundant.
3. Fill the new rows with random values.
Parameters
----------
m : np.ndarray
The matrix to restore.
num_rows : int
The number of rows that this array should have.
hash2index : dict
the product of the optimization we are undoing.
seed : float, optional
The seed for the PRNG. Will be used to initialize new rows.
fill : float or array or None, optional
Value for new rows. If None (the default), randomly initialize.
Returns
-------
np.array
The unpacked matrix.
Notes
-----
The unpacked matrix will reference some rows in the input matrix to save memory.
Throw away the old matrix after calling this function, or use np.copy.
"""
orig_rows, *more_dims = m.shape
if orig_rows == num_rows:
#
# Nothing to do.
#
return m
assert num_rows > orig_rows
if fill is None:
rand_obj = np.random
rand_obj.seed(seed)
#
# Rows at the top of the matrix (the first orig_rows) will contain "packed" learned vectors.
# Rows at the bottom of the matrix will be "free": initialized to random values.
#
m = _pad_random(m, num_rows - orig_rows, rand_obj)
else:
m = np.concatenate([m, [fill] * (num_rows - orig_rows)])
#
# Swap rows to transform hash2index into the identify function.
# There are two kinds of swaps.
# First, rearrange the rows that belong entirely within the original matrix dimensions.
# Second, swap out rows from the original matrix dimensions, replacing them with
# randomly initialized values.
#
# N.B. We only do the swap in one direction, because doing it in both directions
# nullifies the effect.
#
swap = {h: i for (h, i) in hash2index.items() if h < i < orig_rows}
swap.update({h: i for (h, i) in hash2index.items() if h >= orig_rows})
for h, i in swap.items():
assert h != i
m[[h, i]] = m[[i, h]] # swap rows i and h
return m
#
# UTF-8 bytes that begin with 10 are subsequent bytes of a multi-byte sequence,
# as opposed to a new character.
#
_MB_MASK = 0xC0
_MB_START = 0x80
def _is_utf8_continue(b):
return b & _MB_MASK == _MB_START
def ft_ngram_hashes(word, minn, maxn, num_buckets):
"""Calculate the ngrams of the word and hash them.
Parameters
----------
word : str
The word to calculate ngram hashes for.
minn : int
Minimum ngram length
maxn : int
Maximum ngram length
num_buckets : int
The number of buckets
Returns
-------
A list of hashes (integers), one per each detected ngram.
"""
encoded_ngrams = compute_ngrams_bytes(word, minn, maxn)
hashes = [ft_hash_bytes(n) % num_buckets for n in encoded_ngrams]
return hashes
# BACKWARD COMPATIBILITY FOR OLDER PICKLES
from gensim.models import keyedvectors # noqa: E402
keyedvectors.FastTextKeyedVectors = FastTextKeyedVectors
| 55,109
|
Python
|
.py
| 1,114
| 41.248654
| 120
| 0.652901
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,114
|
lsimodel.py
|
piskvorky_gensim/gensim/models/lsimodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Module for `Latent Semantic Analysis (aka Latent Semantic Indexing)
<https://en.wikipedia.org/wiki/Latent_semantic_analysis#Latent_semantic_indexing>`_.
Implements fast truncated SVD (Singular Value Decomposition). The SVD decomposition can be updated with new observations
at any time, for an online, incremental, memory-efficient training.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size
* corpora that are streamed: documents are only accessed sequentially, no
random access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <https://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> vectorized_corpus = model[common_corpus] # vectorize input copus in BoW format
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import time
import numpy as np
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from gensim.models import basemodel
from gensim.utils import is_empty
logger = logging.getLogger(__name__)
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""Find how many factors should be kept to avoid storing spurious (tiny, numerically unstable) values.
Parameters
----------
s : list of float
Eigenvalues of the original matrix.
k : int
Maximum desired rank (number of factors)
discard: float
Percentage of the spectrum's energy to be discarded.
Returns
-------
int
Rank (number of factors) of the reduced matrix.
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = np.abs(1.0 - np.cumsum(s / np.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(np.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)", k, 100 * rel_spectrum[k - 1])
return k
def asfarray(a, name=''):
"""Get an array laid out in Fortran order in memory.
Parameters
----------
a : numpy.ndarray
Input array.
name : str, optional
Array name, used only for logging purposes.
Returns
-------
np.ndarray
The input `a` in Fortran, or column-major order.
"""
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order", a.shape, name)
a = np.asfortranarray(a)
return a
def ascarray(a, name=''):
"""Return a contiguous array in memory (C order).
Parameters
----------
a : numpy.ndarray
Input array.
name : str, optional
Array name, used for logging purposes.
Returns
-------
np.ndarray
Contiguous array (row-major order) of same shape and content as `a`.
"""
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order", a.shape, name)
a = np.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
"""Low dimensional projection of a term-document matrix.
This is the class taking care of the 'core math': interfacing with corpora, splitting large corpora into chunks
and merging them etc. This done through the higher-level :class:`~gensim.models.lsimodel.LsiModel` class.
Notes
-----
The projection can be later updated by merging it with another :class:`~gensim.models.lsimodel.Projection`
via :meth:`~gensim.models.lsimodel.Projection.merge`. This is how incremental training actually happens.
"""
def __init__(
self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS,
extra_dims=P2_EXTRA_DIMS, dtype=np.float64, random_seed=None,
):
"""Construct the (U, S) projection from a corpus.
Parameters
----------
m : int
Number of features (terms) in the corpus.
k : int
Desired rank of the decomposed matrix.
docs : {iterable of list of (int, float), scipy.sparse.csc}
Corpus in BoW format or as sparse matrix.
use_svdlibc : bool, optional
If True - will use `sparsesvd library <https://pypi.org/project/sparsesvd/>`_,
otherwise - our own version will be used.
power_iters: int, optional
Number of power iteration steps to be used. Tune to improve accuracy.
extra_dims : int, optional
Extra samples to be used besides the rank `k`. Tune to improve accuracy.
dtype : numpy.dtype, optional
Enforces a type for elements of the decomposed matrix.
random_seed: {None, int}, optional
Random seed used to initialize the pseudo-random number generator,
a local instance of numpy.random.RandomState instance.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
self.random_seed = random_seed
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(
docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims, dtype=dtype, random_seed=self.random_seed)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix", str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
# ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30)
u = ut.T
del ut, vt
k = clip_spectrum(s ** 2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
"""Get an empty Projection with the same parameters as the current object.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
An empty copy (without corpus) of the current projection.
"""
return Projection(
self.m, self.k, power_iters=self.power_iters,
extra_dims=self.extra_dims, random_seed=self.random_seed,
)
def merge(self, other, decay=1.0):
"""Merge current :class:`~gensim.models.lsimodel.Projection` instance with another.
Warnings
--------
The content of `other` is destroyed in the process, so pass this function a copy of `other`
if you need it further. The `other` :class:`~gensim.models.lsimodel.Projection` is expected to contain
the same number of features.
Parameters
----------
other : :class:`~gensim.models.lsimodel.Projection`
The Projection object to be merged into the current one. It will be destroyed after merging.
decay : float, optional
Weight of existing observations relatively to new ones.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the input document stream,
by giving less emphasis to old observations. This allows LSA to gradually "forget" old observations
(documents) and give more preference to new ones.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError(
"vector space mismatch: update is using %s features, expected %s" % (other.m, self.m)
)
logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = np.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= np.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = np.bmat([
[np.diag(decay * self.s), np.multiply(c, other.s)],
[matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]
])
logger.debug("computing SVD of %s dense matrix", k.shape)
try:
# in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of np, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/np-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/np/ticket/706
# sdoering: replaced np's linalg.svd with scipy's linalg.svd:
# TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper
# for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False)
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
# if this fails too, give up with an exception
u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False)
s_k = np.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k ** 2, self.k)
u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = np.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = np.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in range(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
class LsiModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""Model for `Latent Semantic Indexing
<https://en.wikipedia.org/wiki/Latent_semantic_analysis#Latent_semantic_indexing>`_.
The decomposition algorithm is described in `"Fast and Faster: A Comparison of Two Streamed
Matrix Decomposition Algorithms" <https://arxiv.org/pdf/1102.5597.pdf>`_.
Notes
-----
* :attr:`gensim.models.lsimodel.LsiModel.projection.u` - left singular vectors,
* :attr:`gensim.models.lsimodel.LsiModel.projection.s` - singular values,
* ``model[training_corpus]`` - right singular vectors (can be reconstructed if needed).
See Also
--------
`FAQ about LSI matrices
<https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi>`_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus[:3], id2word=common_dictionary) # train model
>>> vector = model[common_corpus[4]] # apply model to BoW document
>>> model.add_documents(common_corpus[4:]) # update model with new documents
>>> tmp_fname = get_tmpfile("lsi.model")
>>> model.save(tmp_fname) # save model
>>> loaded_model = LsiModel.load(tmp_fname) # load model
"""
def __init__(
self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True, power_iters=P2_EXTRA_ITERS,
extra_samples=P2_EXTRA_DIMS, dtype=np.float64, random_seed=None,
):
"""Build an LSI model.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or a sparse matrix of shape (`num_documents`, `num_terms`).
num_topics : int, optional
Number of requested factors (latent dimensions)
id2word : dict of {int: str}, optional
ID to word mapping, optional.
chunksize : int, optional
Number of documents to be used in each training chunk.
decay : float, optional
Weight of existing observations relatively to new ones.
distributed : bool, optional
If True - distributed mode (parallel execution on several machines) will be used.
onepass : bool, optional
Whether the one-pass algorithm should be used for training.
Pass `False` to force a multi-pass stochastic algorithm.
power_iters: int, optional
Number of power iteration steps to be used.
Increasing the number of power iterations improves accuracy, but lowers performance
extra_samples : int, optional
Extra samples to be used besides the rank `k`. Can improve accuracy.
dtype : type, optional
Enforces a type for elements of the decomposed matrix.
random_seed: {None, int}, optional
Random seed used to initialize the pseudo-random number generator,
a local instance of numpy.random.RandomState instance.
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
self.dtype = dtype
self.random_seed = random_seed
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + (max(self.id2word.keys()) if self.id2word else -1)
self.docs_processed = 0
self.projection = Projection(
self.num_terms, self.num_topics, power_iters=self.power_iters,
extra_dims=self.extra_samples, dtype=dtype, random_seed=self.random_seed
)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError(
"distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized."
)
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
logger.debug("looking for dispatcher at %s", str(dispatcher._pyroUri))
dispatcher.initialize(
id2word=self.id2word, num_topics=num_topics, chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples, distributed=False, onepass=onepass
)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)", err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
start = time.time()
self.add_documents(corpus)
self.add_lifecycle_event(
"created",
msg=f"trained {self} in {time.time() - start:.2f}s",
)
def add_documents(self, corpus, chunksize=None, decay=None):
"""Update model with new `corpus`.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_terms`, num_documents).
chunksize : int, optional
Number of documents to be used in each training chunk, will use `self.chunksize` if not specified.
decay : float, optional
Weight of existing observations relatively to new ones, will use `self.decay` if not specified.
Notes
-----
Training proceeds in chunks of `chunksize` documents at a time. The size of `chunksize` is a tradeoff
between increased speed (bigger `chunksize`) vs. lower memory footprint (smaller `chunksize`).
If the distributed mode is on, each chunk is sent to a different worker/computer.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if is_empty(corpus):
logger.warning('LsiModel.add_documents() called but no documents provided, is this intended?')
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(
self.num_terms, self.num_topics, None,
dtype=self.dtype, random_seed=self.random_seed,
)
update.u, update.s = stochastic_svd(
corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters, dtype=self.dtype,
random_seed=self.random_seed,
)
self.projection.merge(update, decay=decay)
self.docs_processed += len(corpus) if hasattr(corpus, '__len__') else 0
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(
chunk, num_docs=len(chunk), num_terms=self.num_terms,
num_nnz=nnz, dtype=self.dtype,
)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i", chunk_no)
# put job into queue; this will eventually block, because the queue has a small finite size
self.dispatcher.putjob(job)
del job
logger.info("dispatched documents up to #%s", doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(
self.num_terms, self.num_topics, job, extra_dims=self.extra_samples,
power_iters=self.power_iters, dtype=self.dtype, random_seed=self.random_seed,
)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s", doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
self.docs_processed += doc_no
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
update = Projection(
self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples,
power_iters=self.power_iters, dtype=self.dtype,
)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents", corpus.shape[1])
self.docs_processed += corpus.shape[1]
def __str__(self):
"""Get a human readable representation of model.
Returns
-------
str
A human readable string of the current objects parameters.
"""
return "%s<num_terms=%s, num_topics=%s, decay=%s, chunksize=%s>" % (
self.__class__.__name__, self.num_terms, self.num_topics, self.decay, self.chunksize
)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""Get the latent representation for `bow`.
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Document or corpus in BoW representation.
scaled : bool, optional
If True - topics will be scaled by the inverse of singular values.
chunksize : int, optional
Number of documents to be used in each applying chunk.
Returns
-------
list of (int, float)
Latent representation of topics in BoW format for document **OR**
:class:`gensim.matutils.Dense2Corpus`
Latent representation of corpus in BoW format if `bow` is corpus.
"""
if self.projection.u is None:
raise ValueError('No training data provided - LSI model not initialized yet')
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse),
# but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = np.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use np's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = np.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = np.dot(u.take(indices, axis=0).T, np.array(data, dtype=u.dtype))
if not is_corpus:
# convert back from matrix into a 1d vec
topic_dist = topic_dist.reshape(-1)
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a np array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def get_topics(self):
"""Get the topic vectors.
Notes
-----
The number of topics can actually be smaller than `self.num_topics`, if there were not enough factors
in the matrix (real rank of input matrix smaller than `self.num_topics`).
Returns
-------
np.ndarray
The term topic matrix with shape (`num_topics`, `vocabulary_size`)
"""
projections = self.projection.u.T
num_topics = len(projections)
topics = []
for i in range(num_topics):
c = np.asarray(projections[i, :]).flatten()
norm = np.sqrt(np.sum(np.dot(c, c)))
topics.append(1.0 * c / norm)
return np.array(topics)
def show_topic(self, topicno, topn=10):
"""Get the words that define a topic along with their contribution.
This is actually the left singular vector of the specified topic.
The most important words in defining the topic (greatest absolute value) are included
in the output, along with their contribution to the topic.
Parameters
----------
topicno : int
The topics id number.
topn : int
Number of words to be included to the result.
Returns
-------
list of (str, float)
Topic representation in BoW format.
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = np.asarray(self.projection.u.T[topicno, :]).flatten()
norm = np.sqrt(np.sum(np.dot(c, c)))
most = matutils.argsort(np.abs(c), topn, reverse=True)
# Output only (word, score) pairs for `val`s that are within `self.id2word`. See #3090 for details.
return [(self.id2word[val], 1.0 * c[val] / norm) for val in most if val in self.id2word]
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""Get the most significant topics.
Parameters
----------
num_topics : int, optional
The number of topics to be selected, if -1 - all topics will be in result (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
log : bool, optional
If True - log topics with logger.
formatted : bool, optional
If True - each topic represented as string, otherwise - in BoW format.
Returns
-------
list of (int, str)
If `formatted=True`, return sequence with (topic_id, string representation of topics) **OR**
list of (int, list of (str, float))
Otherwise, return sequence with (topic_id, [(word, value), ... ]).
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in range(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i(%.3f): %s", i, self.projection.s[i], topic)
return shown
def print_debug(self, num_topics=5, num_words=10):
"""Print (to log) the most salient words of the first `num_topics` topics.
Unlike :meth:`~gensim.models.lsimodel.LsiModel.print_topics`, this looks for words that are significant for
a particular topic *and* not for others. This *should* result in a
more human-interpretable description of topics.
Alias for :func:`~gensim.models.lsimodel.print_debug`.
Parameters
----------
num_topics : int, optional
The number of topics to be selected (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
"""
# only wrap the module-level fnc
print_debug(
self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words,
)
def save(self, fname, *args, **kwargs):
"""Save the model to a file.
Notes
-----
Large internal arrays may be stored into separate files, with `fname` as prefix.
Warnings
--------
Do not save as a compressed file if you intend to load the file back with `mmap`.
Parameters
----------
fname : str
Path to output file.
*args
Variable length argument list, see :meth:`gensim.utils.SaveLoad.save`.
**kwargs
Arbitrary keyword arguments, see :meth:`gensim.utils.SaveLoad.save`.
See Also
--------
:meth:`~gensim.models.lsimodel.LsiModel.load`
"""
if self.projection is not None:
self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously saved object using :meth:`~gensim.models.lsimodel.LsiModel.save` from file.
Notes
-----
Large arrays can be memmap'ed back as read-only (shared memory) by setting the `mmap='r'` parameter.
Parameters
----------
fname : str
Path to file that contains LsiModel.
*args
Variable length argument list, see :meth:`gensim.utils.SaveLoad.load`.
**kwargs
Arbitrary keyword arguments, see :meth:`gensim.utils.SaveLoad.load`.
See Also
--------
:meth:`~gensim.models.lsimodel.LsiModel.save`
Returns
-------
:class:`~gensim.models.lsimodel.LsiModel`
Loaded instance.
Raises
------
IOError
When methods are called on instance (should be called from class).
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LsiModel, cls).load(fname, *args, **kwargs)
projection_fname = utils.smart_extension(fname, '.projection')
try:
result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s", projection_fname, e)
return result
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
"""Log the most salient words per topic.
Parameters
----------
id2token : :class:`~gensim.corpora.dictionary.Dictionary`
Mapping from ID to word in the Dictionary.
u : np.ndarray
The 2D U decomposition matrix.
s : np.ndarray
The 1D reduced array of eigenvalues used for decomposition.
topics : list of int
Sequence of topic IDs to be printed
num_words : int, optional
Number of words to be included for each topic.
num_neg : int, optional
Number of words with a negative contribution to a topic that should be included.
"""
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics', len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = np.abs(np.asarray(uvec).flatten())
udiff = uvec / np.sqrt(np.sum(np.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words", num_words, num_neg)
for topic in sorted(result.keys()):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s', topic, s[topic], ', '.join(pos), ', '.join(neg))
def stochastic_svd(
corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=np.float64, eps=1e-6, random_seed=None,
):
"""Run truncated Singular Value Decomposition (SVD) on a sparse input.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse}
Input corpus as a stream (does not have to fit in RAM)
or a sparse matrix of shape (`num_terms`, num_documents).
rank : int
Desired number of factors to be retained after decomposition.
num_terms : int
The number of features (terms) in `corpus`.
chunksize : int, optional
Number of documents to be used in each training chunk.
extra_dims : int, optional
Extra samples to be used besides the rank `k`. Can improve accuracy.
power_iters: int, optional
Number of power iteration steps to be used. Increasing the number of power iterations improves accuracy,
but lowers performance.
dtype : numpy.dtype, optional
Enforces a type for elements of the decomposed matrix.
eps: float, optional
Percentage of the spectrum's energy to be discarded.
random_seed: {None, int}, optional
Random seed used to initialize the pseudo-random number generator,
a local instance of numpy.random.RandomState instance.
Notes
-----
The corpus may be larger than RAM (iterator of vectors), if `corpus` is a `scipy.sparse.csc` instead,
it is assumed the whole corpus fits into core memory and a different (more efficient) code path is chosen.
This may return less than the requested number of top `rank` factors, in case the input itself is of lower rank.
The `extra_dims` (oversampling) and especially `power_iters` (power iterations) parameters affect accuracy of the
decomposition.
This algorithm uses `2 + power_iters` passes over the input data. In case you can only afford a single pass,
set `onepass=True` in :class:`~gensim.models.lsimodel.LsiModel` and avoid using this function directly.
The decomposition algorithm is based on `"Finding structure with randomness:
Probabilistic algorithms for constructing approximate matrix decompositions" <https://arxiv.org/abs/0909.4061>`_.
Returns
-------
(np.ndarray 2D, np.ndarray 1D)
The left singular vectors and the singular values of the `corpus`.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations", samples - rank, power_iters)
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = np.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix", str(y.shape))
random_state = np.random.RandomState(random_seed)
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, f"mismatch in number of features: {m} in sparse matrix vs. {num_terms} parameter"
o = random_state.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(
m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel(),
) # y = corpus * o
del o
# unlike np, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix", str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations", power_iters)
for _ in range(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i', (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = random_state.normal(0.0, 1.0, (n, samples), ).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(
m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel(),
)
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in range(power_iters):
logger.info("running power iteration #%i", power_iter + 1)
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
# documents = columns of sparse CSC
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype)
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix", str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = np.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=dtype)
logger.info("2nd phase: constructing %s covariance matrix", str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += np.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix", str(x.shape))
# could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
u, s, vt = scipy.linalg.svd(x)
# sqrt to go back from singular values of X to singular values of B = singular values of the corpus
s = np.sqrt(s)
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s ** 2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = np.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| 45,136
|
Python
|
.py
| 896
| 40.15625
| 120
| 0.61118
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,115
|
nmf_pgd.pyx
|
piskvorky_gensim/gensim/models/nmf_pgd.pyx
|
# Author: Timofey Yefimov
# cython: language_level=3
# cython: cdivision=True
# cython: boundscheck=False
# cython: wraparound=False
# cython: nonecheck=False
# cython: embedsignature=True
from libc.math cimport sqrt
from cython.parallel import prange
cdef double fmin(double x, double y) nogil:
return x if x < y else y
cdef double fmax(double x, double y) nogil:
return x if x > y else y
def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa):
"""Find optimal dense vector representation for current W and r matrices.
Parameters
----------
h : matrix
Dense representation of documents in current batch.
Wtv : matrix
WtW : matrix
Returns
-------
float
Cumulative difference between previous and current h vectors.
"""
cdef Py_ssize_t n_components = h.shape[0]
cdef Py_ssize_t n_samples = h.shape[1]
cdef double violation = 0
cdef double grad, projected_grad, hessian
cdef Py_ssize_t sample_idx = 0
cdef Py_ssize_t component_idx_1 = 0
cdef Py_ssize_t component_idx_2 = 0
for sample_idx in prange(n_samples, nogil=True):
for component_idx_1 in range(n_components):
component_idx_1 = permutation[component_idx_1]
grad = -Wtv[component_idx_1, sample_idx]
for component_idx_2 in range(n_components):
grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx]
hessian = WtW[component_idx_1, component_idx_1]
grad = grad * kappa / hessian
projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad
violation += projected_grad * projected_grad
h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.)
return sqrt(violation)
| 1,869
|
Python
|
.py
| 45
| 35.244444
| 104
| 0.661683
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,116
|
lda_dispatcher.py
|
piskvorky_gensim/gensim/models/lda_dispatcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Dispatcher process which orchestrates distributed Latent Dirichlet Allocation
(LDA, :class:`~gensim.models.ldamodel.LdaModel`) computations.
Run this script only once, on any node in your cluster.
Notes
-----
The dispatcher expects to find worker scripts already running. Make sure you run as many workers as you like on
your machines **before** launching the dispatcher.
How to use distributed :class:`~gensim.models.ldamodel.LdaModel`
----------------------------------------------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lda_worker &
#. Run dispatcher ::
python -m gensim.models.lda_dispatcher &
#. Run :class:`~gensim.models.ldamodel.LdaModel` in distributed mode :
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LdaModel
>>>
>>> model = LdaModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lda_dispatcher --help
:ellipsis: 0, -7
"""
import argparse
import os
import sys
import logging
import threading
import time
from queue import Queue
import Pyro4
from gensim import utils
from gensim.models.lda_worker import LDA_WORKER_PREFIX
logger = logging.getLogger("gensim.models.lda_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LDA), in which case you can override
# this value from command line. ie. run "python ./lda_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should theoretically be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
LDA_DISPATCHER_PREFIX = 'gensim.lda_dispatcher'
class Dispatcher:
"""Dispatcher object that communicates and coordinates individual workers.
Warnings
--------
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=MAX_JOBS_QUEUE, ns_conf=None):
"""Partly initializes the dispatcher.
A full initialization (including initialization of the workers) requires a call to
:meth:`~gensim.models.lda_dispatcher.Dispatcher.initialize`
Parameters
----------
maxsize : int, optional
Maximum number of jobs to be kept pre-fetched in the queue.
ns_conf : dict of (str, object)
Sets up the name server configuration for the pyro daemon server of dispatcher.
This also helps to keep track of your objects in your network by using logical object names
instead of exact object name(or id) and its location.
"""
self.maxsize = maxsize
self.callback = None
self.ns_conf = ns_conf if ns_conf is not None else {}
@Pyro4.expose
def initialize(self, **model_params):
"""Fully initialize the dispatcher and all its workers.
Parameters
----------
**model_params
Keyword parameters used to initialize individual workers, see :class:`~gensim.models.ldamodel.LdaModel`.
Raises
------
RuntimeError
When no workers are found (the :mod:`gensim.models.lda_worker` script must be ran beforehand).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
self.workers = {}
with utils.getNS(**self.ns_conf) as ns:
self.callback = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
for name, uri in ns.list(prefix=LDA_WORKER_PREFIX).items():
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i at %s", workerid, uri)
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.warning("unresponsive worker at %s,deleting it from the name server", uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""Return pyro URIs of all registered workers.
Returns
-------
list of URIs
The pyro URIs for each worker.
"""
return [worker._pyroUri for worker in self.workers.values()]
@Pyro4.expose
def getjob(self, worker_id):
"""Atomically pop a job from the queue.
Parameters
----------
worker_id : int
The worker that requested the job.
Returns
-------
iterable of list of (int, float)
The corpus in BoW format.
"""
logger.info("worker #%i requesting a new job", worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
return job
@Pyro4.expose
def putjob(self, job):
"""Atomically add a job to the queue.
Parameters
----------
job : iterable of list of (int, float)
The corpus in BoW format.
"""
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""Merge states from across all workers and return the result.
Returns
-------
:class:`~gensim.models.ldamodel.LdaState`
Merged resultant state
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
i = 0
count = 10
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
i += 1
if i > count:
i = 0
for workerid, worker in self.workers.items():
logger.info("checking aliveness for worker %s", workerid)
worker.ping()
logger.info("merging states from %i workers", len(self.workers))
workers = list(self.workers.values())
result = workers[0].getstate()
for worker in workers[1:]:
result.merge(worker.getstate())
logger.info("sending out merged state")
return result
@Pyro4.expose
def reset(self, state):
"""Reinitialize all workers for a new EM iteration.
Parameters
----------
state : :class:`~gensim.models.ldamodel.LdaState`
State of :class:`~gensim.models.lda.LdaModel`.
"""
for workerid, worker in self.workers.items():
logger.info("resetting worker %s", workerid)
worker.reset(state)
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.
Callback used by workers to notify when their job is done.
The job done event is logged and then control is asynchronously transfered back to the worker
(who can then request another job). In this way, control flow basically oscillates between
:meth:`gensim.models.lda_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lda_worker.Worker.requestjob`.
Parameters
----------
workerid : int
The ID of the worker that finished the job (used for logging).
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
self.workers[workerid].requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap :attr:`~gensim.models.lda_dispatcher.Dispatcher._jobsdone` needed for remote access through proxies.
Returns
-------
int
Number of jobs already completed.
"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""Terminate all registered workers and then the dispatcher."""
for workerid, worker in self.workers.items():
logger.info("terminating worker %s", workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
def main():
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--maxsize",
help="How many jobs (=chunks of N documents) to keep 'pre-fetched' in a queue (default: %(default)s)",
type=int, default=MAX_JOBS_QUEUE
)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument("--no-broadcast", help="Disable broadcast (default: %(default)s)",
action='store_const', default=True, const=False)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument(
'-v', '--verbose',
help='Verbose flag',
action='store_const', dest="loglevel", const=logging.INFO, default=logging.WARNING
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {
"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac
}
utils.pyro_daemon(LDA_DISPATCHER_PREFIX, Dispatcher(maxsize=args.maxsize, ns_conf=ns_conf), ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
| 11,131
|
Python
|
.py
| 251
| 36.350598
| 119
| 0.639822
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,117
|
poincare.py
|
piskvorky_gensim/gensim/models/poincare.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Python implementation of Poincaré Embeddings.
These embeddings are better at capturing latent hierarchical information than traditional Euclidean embeddings.
The method is described in detail in `Maximilian Nickel, Douwe Kiela -
"Poincaré Embeddings for Learning Hierarchical Representations" <https://arxiv.org/abs/1705.08039>`_.
The main use-case is to automatically learn hierarchical representations of nodes from a tree-like structure,
such as a Directed Acyclic Graph (DAG), using a transitive closure of the relations. Representations of nodes in a
symmetric graph can also be learned.
This module allows training Poincaré Embeddings from a training file containing relations of graph in a
csv-like format, or from a Python iterable of relations.
Examples
--------
Initialize and train a model from a list
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
Initialize and train a model from a file containing one relation per line
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
>>> model.train(epochs=50)
"""
import csv
import logging
from numbers import Integral
import sys
import time
from collections import defaultdict, Counter
import numpy as np
from numpy import random as np_random, float32 as REAL
from scipy.stats import spearmanr
try:
from autograd import grad # Only required for optionally verifying gradients while training
from autograd import numpy as grad_np
AUTOGRAD_PRESENT = True
except ImportError:
AUTOGRAD_PRESENT = False
from gensim import utils, matutils
from gensim.models.keyedvectors import KeyedVectors
logger = logging.getLogger(__name__)
class PoincareModel(utils.SaveLoad):
"""Train, use and evaluate Poincare Embeddings.
The model can be stored/loaded via its :meth:`~gensim.models.poincare.PoincareModel.save`
and :meth:`~gensim.models.poincare.PoincareModel.load` methods, or stored/loaded in the word2vec format
via `model.kv.save_word2vec_format` and :meth:`~gensim.models.poincare.PoincareKeyedVectors.load_word2vec_format`.
Notes
-----
Training cannot be resumed from a model loaded via `load_word2vec_format`, if you wish to train further,
use :meth:`~gensim.models.poincare.PoincareModel.save` and :meth:`~gensim.models.poincare.PoincareModel.load`
methods instead.
An important attribute (that provides a lot of additional functionality when directly accessed) are the
keyed vectors:
self.kv : :class:`~gensim.models.poincare.PoincareKeyedVectors`
This object essentially contains the mapping between nodes and embeddings, as well the vocabulary of the model
(set of unique nodes seen by the model). After training, it can be used to perform operations on the vectors
such as vector lookup, distance and similarity calculations etc.
See the documentation of its class for usage examples.
"""
def __init__(self, train_data, size=50, alpha=0.1, negative=10, workers=1, epsilon=1e-5, regularization_coeff=1.0,
burn_in=10, burn_in_alpha=0.01, init_range=(-0.001, 0.001), dtype=np.float64, seed=0):
"""Initialize and train a Poincare embedding model from an iterable of relations.
Parameters
----------
train_data : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
size : int, optional
Number of dimensions of the trained model.
alpha : float, optional
Learning rate for training.
negative : int, optional
Number of negative samples to use.
workers : int, optional
Number of threads to use for training the model.
epsilon : float, optional
Constant used for clipping embeddings below a norm of one.
regularization_coeff : float, optional
Coefficient used for l2-regularization while training (0 effectively disables regularization).
burn_in : int, optional
Number of epochs to use for burn-in initialization (0 means no burn-in).
burn_in_alpha : float, optional
Learning rate for burn-in initialization, ignored if `burn_in` is 0.
init_range : 2-tuple (float, float)
Range within which the vectors are randomly initialized.
dtype : numpy.dtype
The numpy dtype to use for the vectors in the model (numpy.float64, numpy.float32 etc).
Using lower precision floats may be useful in increasing training speed and reducing memory usage.
seed : int, optional
Seed for random to ensure reproducibility.
Examples
--------
Initialize a model from a list:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
Initialize a model from a file containing one relation per line:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
See :class:`~gensim.models.poincare.PoincareRelations` for more options.
"""
self.train_data = train_data
self.kv = PoincareKeyedVectors(size, 0)
self.all_relations = []
self.node_relations = defaultdict(set)
self._negatives_buffer = NegativesBuffer([])
self._negatives_buffer_size = 2000
self.size = size
self.train_alpha = alpha # Learning rate for training
self.burn_in_alpha = burn_in_alpha # Learning rate for burn-in
self.alpha = alpha # Current learning rate
self.negative = negative
self.workers = workers
self.epsilon = epsilon
self.regularization_coeff = regularization_coeff
self.burn_in = burn_in
self._burn_in_done = False
self.dtype = dtype
self.seed = seed
self._np_random = np_random.RandomState(seed)
self.init_range = init_range
self._loss_grad = None
self.build_vocab(train_data)
def build_vocab(self, relations, update=False):
"""Build the model's vocabulary from known relations.
Parameters
----------
relations : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
update : bool, optional
If true, only new nodes's embeddings are initialized.
Use this when the model already has an existing vocabulary and you want to update it.
If false, all node's embeddings are initialized.
Use this when you're creating a new vocabulary from scratch.
Examples
--------
Train a model and update vocab for online training:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>>
>>> # train a new model from initial data
>>> initial_relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal')]
>>> model = PoincareModel(initial_relations, negative=1)
>>> model.train(epochs=50)
>>>
>>> # online training: update the vocabulary and continue training
>>> online_relations = [('striped_skunk', 'mammal')]
>>> model.build_vocab(online_relations, update=True)
>>> model.train(epochs=50)
"""
old_index_to_key_len = len(self.kv.index_to_key)
logger.info("loading relations from train data..")
for relation in relations:
if len(relation) != 2:
raise ValueError('Relation pair "%s" should have exactly two items' % repr(relation))
for item in relation:
if item in self.kv.key_to_index:
self.kv.set_vecattr(item, 'count', self.kv.get_vecattr(item, 'count') + 1)
else:
self.kv.key_to_index[item] = len(self.kv.index_to_key)
self.kv.index_to_key.append(item)
self.kv.set_vecattr(item, 'count', 1)
node_1, node_2 = relation
node_1_index, node_2_index = self.kv.key_to_index[node_1], self.kv.key_to_index[node_2]
self.node_relations[node_1_index].add(node_2_index)
relation = (node_1_index, node_2_index)
self.all_relations.append(relation)
logger.info("loaded %d relations from train data, %d nodes", len(self.all_relations), len(self.kv))
self.indices_set = set(range(len(self.kv.index_to_key))) # Set of all node indices
self.indices_array = np.fromiter(range(len(self.kv.index_to_key)), dtype=int) # Numpy array of all node indices
self._init_node_probabilities()
if not update:
self._init_embeddings()
else:
self._update_embeddings(old_index_to_key_len)
def _init_embeddings(self):
"""Randomly initialize vectors for the items in the vocab."""
shape = (len(self.kv.index_to_key), self.size)
self.kv.vectors = self._np_random.uniform(self.init_range[0], self.init_range[1], shape).astype(self.dtype)
def _update_embeddings(self, old_index_to_key_len):
"""Randomly initialize vectors for the items in the additional vocab."""
shape = (len(self.kv.index_to_key) - old_index_to_key_len, self.size)
v = self._np_random.uniform(self.init_range[0], self.init_range[1], shape).astype(self.dtype)
self.kv.vectors = np.concatenate([self.kv.vectors, v])
def _init_node_probabilities(self):
"""Initialize a-priori probabilities."""
counts = self.kv.expandos['count'].astype(np.float64)
self._node_counts_cumsum = np.cumsum(counts)
self._node_probabilities = counts / counts.sum()
def _get_candidate_negatives(self):
"""Get candidate negatives of size `self.negative` from the negative examples buffer.
Returns
-------
numpy.array
Array of shape (`self.negative`,) containing indices of negative nodes.
"""
if self._negatives_buffer.num_items() < self.negative:
# cumsum table of counts used instead of the standard approach of a probability cumsum table
# this is to avoid floating point errors that result when the number of nodes is very high
# for reference: https://github.com/RaRe-Technologies/gensim/issues/1917
max_cumsum_value = self._node_counts_cumsum[-1]
uniform_numbers = self._np_random.randint(1, max_cumsum_value + 1, self._negatives_buffer_size)
cumsum_table_indices = np.searchsorted(self._node_counts_cumsum, uniform_numbers)
self._negatives_buffer = NegativesBuffer(cumsum_table_indices)
return self._negatives_buffer.get_items(self.negative)
def _sample_negatives(self, node_index):
"""Get a sample of negatives for the given node.
Parameters
----------
node_index : int
Index of the positive node for which negative samples are to be returned.
Returns
-------
numpy.array
Array of shape (self.negative,) containing indices of negative nodes for the given node index.
"""
node_relations = self.node_relations[node_index]
num_remaining_nodes = len(self.kv) - len(node_relations)
if num_remaining_nodes < self.negative:
raise ValueError(
'Cannot sample %d negative nodes from a set of %d negative nodes for %s' %
(self.negative, num_remaining_nodes, self.kv.index_to_key[node_index])
)
positive_fraction = float(len(node_relations)) / len(self.kv)
if positive_fraction < 0.01:
# If number of positive relations is a small fraction of total nodes
# re-sample till no positively connected nodes are chosen
indices = self._get_candidate_negatives()
unique_indices = set(indices)
times_sampled = 1
while (len(indices) != len(unique_indices)) or (unique_indices & node_relations):
times_sampled += 1
indices = self._get_candidate_negatives()
unique_indices = set(indices)
if times_sampled > 1:
logger.debug('sampled %d times, positive fraction %.5f', times_sampled, positive_fraction)
else:
# If number of positive relations is a significant fraction of total nodes
# subtract positively connected nodes from set of choices and sample from the remaining
valid_negatives = np.array(list(self.indices_set - node_relations))
probs = self._node_probabilities[valid_negatives]
probs /= probs.sum()
indices = self._np_random.choice(valid_negatives, size=self.negative, p=probs, replace=False)
return list(indices)
@staticmethod
def _loss_fn(matrix, regularization_coeff=1.0):
"""Computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
regularization_coeff : float, optional
Coefficient to use for l2-regularization
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
norm = grad_np.linalg.norm(vector_u)
all_norms = grad_np.linalg.norm(vectors_v, axis=1)
poincare_dists = grad_np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
exp_negative_distances = grad_np.exp(-poincare_dists)
regularization_term = regularization_coeff * grad_np.linalg.norm(vectors_v[0]) ** 2
return -grad_np.log(exp_negative_distances[0] / (exp_negative_distances.sum())) + regularization_term
@staticmethod
def _clip_vectors(vectors, epsilon):
"""Clip vectors to have a norm of less than one.
Parameters
----------
vectors : numpy.array
Can be 1-D, or 2-D (in which case the norm for each row is checked).
epsilon : float
Parameter for numerical stability, each dimension of the vector is reduced by `epsilon`
if the norm of the vector is greater than or equal to 1.
Returns
-------
numpy.array
Array with norms clipped below 1.
"""
one_d = len(vectors.shape) == 1
threshold = 1 - epsilon
if one_d:
norm = np.linalg.norm(vectors)
if norm < threshold:
return vectors
else:
return vectors / norm - (np.sign(vectors) * epsilon)
else:
norms = np.linalg.norm(vectors, axis=1)
if (norms < threshold).all():
return vectors
else:
vectors[norms >= threshold] *= (threshold / norms[norms >= threshold])[:, np.newaxis]
vectors[norms >= threshold] -= np.sign(vectors[norms >= threshold]) * epsilon
return vectors
def save(self, *args, **kwargs):
"""Save complete model to disk, inherited from :class:`~gensim.utils.SaveLoad`.
See also
--------
:meth:`~gensim.models.poincare.PoincareModel.load`
Parameters
----------
*args
Positional arguments passed to :meth:`~gensim.utils.SaveLoad.save`.
**kwargs
Keyword arguments passed to :meth:`~gensim.utils.SaveLoad.save`.
"""
self._loss_grad = None # Can't pickle autograd fn to disk
attrs_to_ignore = ['_node_probabilities', '_node_counts_cumsum']
kwargs['ignore'] = set(list(kwargs.get('ignore', [])) + attrs_to_ignore)
super(PoincareModel, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Load model from disk, inherited from :class:`~gensim.utils.SaveLoad`.
See also
--------
:meth:`~gensim.models.poincare.PoincareModel.save`
Parameters
----------
*args
Positional arguments passed to :meth:`~gensim.utils.SaveLoad.load`.
**kwargs
Keyword arguments passed to :meth:`~gensim.utils.SaveLoad.load`.
Returns
-------
:class:`~gensim.models.poincare.PoincareModel`
The loaded model.
"""
model = super(PoincareModel, cls).load(*args, **kwargs)
model._init_node_probabilities()
return model
def _prepare_training_batch(self, relations, all_negatives, check_gradients=False):
"""Create a training batch and compute gradients and loss for the batch.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
Node indices, computed gradients and loss for the batch.
"""
batch_size = len(relations)
indices_u, indices_v = [], []
for relation, negatives in zip(relations, all_negatives):
u, v = relation
indices_u.append(u)
indices_v.append(v)
indices_v.extend(negatives)
vectors_u = self.kv.vectors[indices_u]
vectors_v = self.kv.vectors[indices_v].reshape((batch_size, 1 + self.negative, self.size))
vectors_v = vectors_v.swapaxes(0, 1).swapaxes(1, 2)
batch = PoincareBatch(vectors_u, vectors_v, indices_u, indices_v, self.regularization_coeff)
batch.compute_all()
if check_gradients:
self._check_gradients(relations, all_negatives, batch)
return batch
def _check_gradients(self, relations, all_negatives, batch, tol=1e-8):
"""Compare computed gradients for batch to autograd gradients.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
batch : :class:`~gensim.models.poincare.PoincareBatch`
Batch for which computed gradients are to be checked.
tol : float, optional
The maximum error between our computed gradients and the reference ones from autograd.
"""
if not AUTOGRAD_PRESENT:
logger.warning('autograd could not be imported, cannot do gradient checking')
logger.warning('please install autograd to enable gradient checking')
return
if self._loss_grad is None:
self._loss_grad = grad(PoincareModel._loss_fn)
max_diff = 0.0
for i, (relation, negatives) in enumerate(zip(relations, all_negatives)):
u, v = relation
auto_gradients = self._loss_grad(
np.vstack((self.kv.vectors[u], self.kv.vectors[[v] + negatives])), self.regularization_coeff)
computed_gradients = np.vstack((batch.gradients_u[:, i], batch.gradients_v[:, :, i]))
diff = np.abs(auto_gradients - computed_gradients).max()
if diff > max_diff:
max_diff = diff
logger.info('max difference between computed gradients and autograd gradients: %.10f', max_diff)
assert max_diff < tol, (
'Max difference between computed gradients and autograd gradients %.10f, '
'greater than tolerance %.10f' % (max_diff, tol))
def _sample_negatives_batch(self, nodes):
"""Get negative examples for each node.
Parameters
----------
nodes : iterable of int
Iterable of node indices for which negative samples are to be returned.
Returns
-------
list of lists
Each inner list is a list of negative samples for a single node in the input list.
"""
all_indices = [self._sample_negatives(node) for node in nodes]
return all_indices
def _train_on_batch(self, relations, check_gradients=False):
"""Perform training for a single training batch.
Parameters
----------
relations : list of tuples of (int, int)
List of tuples of positive examples of the form (node_1_index, node_2_index).
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
The batch that was just trained on, contains computed loss for the batch.
"""
all_negatives = self._sample_negatives_batch(relation[0] for relation in relations)
batch = self._prepare_training_batch(relations, all_negatives, check_gradients)
self._update_vectors_batch(batch)
return batch
@staticmethod
def _handle_duplicates(vector_updates, node_indices):
"""Handle occurrences of multiple updates to the same node in a batch of vector updates.
Parameters
----------
vector_updates : numpy.array
Array with each row containing updates to be performed on a certain node.
node_indices : list of int
Node indices on which the above updates are to be performed on.
Notes
-----
Mutates the `vector_updates` array.
Required because vectors[[2, 1, 2]] += np.array([-0.5, 1.0, 0.5]) performs only the last update
on the row at index 2.
"""
counts = Counter(node_indices)
node_dict = defaultdict(list)
for i, node_index in enumerate(node_indices):
node_dict[node_index].append(i)
for node_index, count in counts.items():
if count == 1:
continue
positions = node_dict[node_index]
# Move all updates to the same node to the last such update, zeroing all the others
vector_updates[positions[-1]] = vector_updates[positions].sum(axis=0)
vector_updates[positions[:-1]] = 0
def _update_vectors_batch(self, batch):
"""Update vectors for nodes in the given batch.
Parameters
----------
batch : :class:`~gensim.models.poincare.PoincareBatch`
Batch containing computed gradients and node indices of the batch for which updates are to be done.
"""
grad_u, grad_v = batch.gradients_u, batch.gradients_v
indices_u, indices_v = batch.indices_u, batch.indices_v
batch_size = len(indices_u)
u_updates = (self.alpha * (batch.alpha ** 2) / 4 * grad_u).T
self._handle_duplicates(u_updates, indices_u)
self.kv.vectors[indices_u] -= u_updates
self.kv.vectors[indices_u] = self._clip_vectors(self.kv.vectors[indices_u], self.epsilon)
v_updates = self.alpha * (batch.beta ** 2)[:, np.newaxis] / 4 * grad_v
v_updates = v_updates.swapaxes(1, 2).swapaxes(0, 1)
v_updates = v_updates.reshape(((1 + self.negative) * batch_size, self.size))
self._handle_duplicates(v_updates, indices_v)
self.kv.vectors[indices_v] -= v_updates
self.kv.vectors[indices_v] = self._clip_vectors(self.kv.vectors[indices_v], self.epsilon)
def train(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Train Poincare embeddings using loaded data and model parameters.
Parameters
----------
epochs : int
Number of iterations (epochs) over the corpus.
batch_size : int, optional
Number of examples to train on in a single batch.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
# Some divide-by-zero results are handled explicitly
old_settings = np.seterr(divide='ignore', invalid='ignore')
logger.info(
"training model of size %d with %d workers on %d relations for %d epochs and %d burn-in epochs, "
"using lr=%.5f burn-in lr=%.5f negative=%d",
self.size, self.workers, len(self.all_relations), epochs, self.burn_in,
self.alpha, self.burn_in_alpha, self.negative
)
if self.burn_in > 0 and not self._burn_in_done:
logger.info("starting burn-in (%d epochs)----------------------------------------", self.burn_in)
self.alpha = self.burn_in_alpha
self._train_batchwise(
epochs=self.burn_in, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
self._burn_in_done = True
logger.info("burn-in finished")
self.alpha = self.train_alpha
logger.info("starting training (%d epochs)----------------------------------------", epochs)
self._train_batchwise(
epochs=epochs, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
logger.info("training finished")
np.seterr(**old_settings)
def _train_batchwise(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Train Poincare embeddings using specified parameters.
Parameters
----------
epochs : int
Number of iterations (epochs) over the corpus.
batch_size : int, optional
Number of examples to train on in a single batch.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
for epoch in range(1, epochs + 1):
indices = list(range(len(self.all_relations)))
self._np_random.shuffle(indices)
avg_loss = 0.0
last_time = time.time()
for batch_num, i in enumerate(range(0, len(indices), batch_size), start=1):
should_print = not (batch_num % print_every)
check_gradients = bool(check_gradients_every) and (batch_num % check_gradients_every) == 0
batch_indices = indices[i:i + batch_size]
relations = [self.all_relations[idx] for idx in batch_indices]
result = self._train_on_batch(relations, check_gradients=check_gradients)
avg_loss += result.loss
if should_print:
avg_loss /= print_every
time_taken = time.time() - last_time
speed = print_every * batch_size / time_taken
logger.info(
'training on epoch %d, examples #%d-#%d, loss: %.2f'
% (epoch, i, i + batch_size, avg_loss))
logger.info(
'time taken for %d examples: %.2f s, %.2f examples / s'
% (print_every * batch_size, time_taken, speed))
last_time = time.time()
avg_loss = 0.0
class PoincareBatch:
"""Compute Poincare distances, gradients and loss for a training batch.
Store intermediate state to avoid recomputing multiple times.
"""
def __init__(self, vectors_u, vectors_v, indices_u, indices_v, regularization_coeff=1.0):
"""
Initialize instance with sets of vectors for which distances are to be computed.
Parameters
----------
vectors_u : numpy.array
Vectors of all nodes `u` in the batch. Expected shape (batch_size, dim).
vectors_v : numpy.array
Vectors of all positively related nodes `v` and negatively sampled nodes `v'`,
for each node `u` in the batch. Expected shape (1 + neg_size, dim, batch_size).
indices_u : list of int
List of node indices for each of the vectors in `vectors_u`.
indices_v : list of lists of int
Nested list of lists, each of which is a list of node indices
for each of the vectors in `vectors_v` for a specific node `u`.
regularization_coeff : float, optional
Coefficient to use for l2-regularization
"""
self.vectors_u = vectors_u.T[np.newaxis, :, :] # (1, dim, batch_size)
self.vectors_v = vectors_v # (1 + neg_size, dim, batch_size)
self.indices_u = indices_u
self.indices_v = indices_v
self.regularization_coeff = regularization_coeff
self.poincare_dists = None
self.euclidean_dists = None
self.norms_u = None
self.norms_v = None
self.alpha = None
self.beta = None
self.gamma = None
self.gradients_u = None
self.distance_gradients_u = None
self.gradients_v = None
self.distance_gradients_v = None
self.loss = None
self._distances_computed = False
self._gradients_computed = False
self._distance_gradients_computed = False
self._loss_computed = False
def compute_all(self):
"""Convenience method to perform all computations."""
self.compute_distances()
self.compute_distance_gradients()
self.compute_gradients()
self.compute_loss()
def compute_distances(self):
"""Compute and store norms, euclidean distances and poincare distances between input vectors."""
if self._distances_computed:
return
euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
norms_u = np.linalg.norm(self.vectors_u, axis=1) # (1, batch_size)
norms_v = np.linalg.norm(self.vectors_v, axis=1) # (1 + neg_size, batch_size)
alpha = 1 - norms_u ** 2 # (1, batch_size)
beta = 1 - norms_v ** 2 # (1 + neg_size, batch_size)
gamma = 1 + 2 * (
(euclidean_dists ** 2) / (alpha * beta)
) # (1 + neg_size, batch_size)
poincare_dists = np.arccosh(gamma) # (1 + neg_size, batch_size)
exp_negative_distances = np.exp(-poincare_dists) # (1 + neg_size, batch_size)
Z = exp_negative_distances.sum(axis=0) # (batch_size)
self.euclidean_dists = euclidean_dists
self.poincare_dists = poincare_dists
self.exp_negative_distances = exp_negative_distances
self.Z = Z
self.gamma = gamma
self.norms_u = norms_u
self.norms_v = norms_v
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._distances_computed = True
def compute_gradients(self):
"""Compute and store gradients of loss function for all input vectors."""
if self._gradients_computed:
return
self.compute_distances()
self.compute_distance_gradients()
# (1 + neg_size, dim, batch_size)
gradients_v = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_v
gradients_v /= self.Z # (1 + neg_size, dim, batch_size)
gradients_v[0] += self.distance_gradients_v[0]
gradients_v[0] += self.regularization_coeff * 2 * self.vectors_v[0]
# (1 + neg_size, dim, batch_size)
gradients_u = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_u
gradients_u /= self.Z # (1 + neg_size, dim, batch_size)
gradients_u = gradients_u.sum(axis=0) # (dim, batch_size)
gradients_u += self.distance_gradients_u[0]
assert not np.isnan(gradients_u).any()
assert not np.isnan(gradients_v).any()
self.gradients_u = gradients_u
self.gradients_v = gradients_v
self._gradients_computed = True
def compute_distance_gradients(self):
"""Compute and store partial derivatives of poincare distance d(u, v) w.r.t all u and all v."""
if self._distance_gradients_computed:
return
self.compute_distances()
euclidean_dists_squared = self.euclidean_dists ** 2 # (1 + neg_size, batch_size)
# (1 + neg_size, 1, batch_size)
c_ = (4 / (self.alpha * self.beta * np.sqrt(self.gamma ** 2 - 1)))[:, np.newaxis, :]
# (1 + neg_size, 1, batch_size)
u_coeffs = ((euclidean_dists_squared + self.alpha) / self.alpha)[:, np.newaxis, :]
distance_gradients_u = u_coeffs * self.vectors_u - self.vectors_v # (1 + neg_size, dim, batch_size)
distance_gradients_u *= c_ # (1 + neg_size, dim, batch_size)
nan_gradients = self.gamma == 1 # (1 + neg_size, batch_size)
if nan_gradients.any():
distance_gradients_u.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_u = distance_gradients_u
# (1 + neg_size, 1, batch_size)
v_coeffs = ((euclidean_dists_squared + self.beta) / self.beta)[:, np.newaxis, :]
distance_gradients_v = v_coeffs * self.vectors_v - self.vectors_u # (1 + neg_size, dim, batch_size)
distance_gradients_v *= c_ # (1 + neg_size, dim, batch_size)
if nan_gradients.any():
distance_gradients_v.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_v = distance_gradients_v
self._distance_gradients_computed = True
def compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self.compute_distances()
self.loss = -np.log(self.exp_negative_distances[0] / self.Z).sum() # scalar
self._loss_computed = True
class PoincareKeyedVectors(KeyedVectors):
"""Vectors and vocab for the :class:`~gensim.models.poincare.PoincareModel` training class.
Used to perform operations on the vectors such as vector lookup, distance calculations etc.
(May be used to save/load final vectors in the plain word2vec format, via the inherited
methods save_word2vec_format() and load_word2vec_format().)
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Query the trained model.
>>> wv = model.kv.get_vector('kangaroo.n.01')
"""
def __init__(self, vector_size, vector_count, dtype=REAL):
super(PoincareKeyedVectors, self).__init__(vector_size, vector_count, dtype=dtype)
self.max_distance = 0
def _load_specials(self, *args, **kwargs):
super(PoincareKeyedVectors, self)._load_specials(*args, **kwargs)
# fixup rename of syn0
if not hasattr(self, 'vectors'):
self.vectors = self.__dict__.pop('syn0')
@staticmethod
def vector_distance(vector_1, vector_2):
"""Compute poincare distance between two input vectors. Convenience method over `vector_distance_batch`.
Parameters
----------
vector_1 : numpy.array
Input vector.
vector_2 : numpy.array
Input vector.
Returns
-------
numpy.float
Poincare distance between `vector_1` and `vector_2`.
"""
return PoincareKeyedVectors.vector_distance_batch(vector_1, vector_2[np.newaxis, :])[0]
@staticmethod
def vector_distance_batch(vector_1, vectors_all):
"""Compute poincare distances between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which Poincare distances are to be computed, expected shape (dim,).
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed, expected shape (num_vectors, dim).
Returns
-------
numpy.array
Poincare distance between `vector_1` and each row in `vectors_all`, shape (num_vectors,).
"""
euclidean_dists = np.linalg.norm(vector_1 - vectors_all, axis=1)
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
return np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
def closest_child(self, node):
"""Get the node closest to `node` that is lower in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest child is to be found.
Returns
-------
{str, None}
Node closest to `node` that is lower in the hierarchy than `node`.
If there are no nodes lower in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.vectors, axis=1)
node_norm = all_norms[self.get_index(node)]
mask = node_norm >= all_norms
if mask.all(): # No nodes lower in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index_to_key[closest_child_index]
def closest_parent(self, node):
"""Get the node closest to `node` that is higher in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest parent is to be found.
Returns
-------
{str, None}
Node closest to `node` that is higher in the hierarchy than `node`.
If there are no nodes higher in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.vectors, axis=1)
node_norm = all_norms[self.get_index(node)]
mask = node_norm <= all_norms
if mask.all(): # No nodes higher in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index_to_key[closest_child_index]
def descendants(self, node, max_depth=5):
"""Get the list of recursively closest children from the given node, up to a max depth of `max_depth`.
Parameters
----------
node : {str, int}
Key for node for which descendants are to be found.
max_depth : int
Maximum number of descendants to return.
Returns
-------
list of str
Descendant nodes from the node `node`.
"""
depth = 0
descendants = []
current_node = node
while depth < max_depth:
descendants.append(self.closest_child(current_node))
current_node = descendants[-1]
depth += 1
return descendants
def ancestors(self, node):
"""Get the list of recursively closest parents from the given node.
Parameters
----------
node : {str, int}
Key for node for which ancestors are to be found.
Returns
-------
list of str
Ancestor nodes of the node `node`.
"""
ancestors = []
current_node = node
ancestor = self.closest_parent(current_node)
while ancestor is not None:
ancestors.append(ancestor)
ancestor = self.closest_parent(ancestors[-1])
return ancestors
def distance(self, w1, w2):
"""Calculate Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : {str, int}
Key for first node.
w2 : {str, int}
Key for second node.
Returns
-------
float
Poincare distance between the vectors for nodes `w1` and `w2`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # What is the distance between the words 'mammal' and 'carnivore'?
>>> model.kv.distance('mammal.n.01', 'carnivore.n.01')
2.9742298803339304
Raises
------
KeyError
If either of `w1` and `w2` is absent from vocab.
"""
vector_1 = self.get_vector(w1)
vector_2 = self.get_vector(w2)
return self.vector_distance(vector_1, vector_2)
def similarity(self, w1, w2):
"""Compute similarity based on Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : {str, int}
Key for first node.
w2 : {str, int}
Key for second node.
Returns
-------
float
Similarity between the between the vectors for nodes `w1` and `w2` (between 0 and 1).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # What is the similarity between the words 'mammal' and 'carnivore'?
>>> model.kv.similarity('mammal.n.01', 'carnivore.n.01')
0.25162107631176484
Raises
------
KeyError
If either of `w1` and `w2` is absent from vocab.
"""
return 1 / (1 + self.distance(w1, w2))
def most_similar(self, node_or_vector, topn=10, restrict_vocab=None):
"""Find the top-N most similar nodes to the given node or vector, sorted in increasing order of distance.
Parameters
----------
node_or_vector : {str, int, numpy.array}
node key or vector for which similar nodes are to be found.
topn : int or None, optional
Number of top-N similar nodes to return, when `topn` is int. When `topn` is None,
then distance for all nodes are returned.
restrict_vocab : int or None, optional
Optional integer which limits the range of vectors which are searched for most-similar values.
For example, restrict_vocab=10000 would only check the first 10000 node vectors in the vocabulary order.
This may be meaningful if vocabulary is sorted by descending frequency.
Returns
--------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (node, distance) is returned in increasing order of distance.
When `topn` is None, then similarities for all words are returned as a one-dimensional numpy array with the
size of the vocabulary.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Which words are most similar to 'kangaroo'?
>>> model.kv.most_similar('kangaroo.n.01', topn=2)
[(u'kangaroo.n.01', 0.0), (u'marsupial.n.01', 0.26524229460827725)]
"""
if isinstance(topn, Integral) and topn < 1:
return []
if not restrict_vocab:
all_distances = self.distances(node_or_vector)
else:
nodes_to_use = self.index_to_key[:restrict_vocab]
all_distances = self.distances(node_or_vector, nodes_to_use)
if isinstance(node_or_vector, (str, int,)):
node_index = self.get_index(node_or_vector)
else:
node_index = None
if not topn:
closest_indices = matutils.argsort(all_distances)
else:
closest_indices = matutils.argsort(all_distances, topn=1 + topn)
result = [
(self.index_to_key[index], float(all_distances[index]))
for index in closest_indices if (not node_index or index != node_index) # ignore the input node
]
if topn:
result = result[:topn]
return result
def distances(self, node_or_vector, other_nodes=()):
"""Compute Poincare distances from given `node_or_vector` to all nodes in `other_nodes`.
If `other_nodes` is empty, return distance between `node_or_vector` and all nodes in vocab.
Parameters
----------
node_or_vector : {str, int, numpy.array}
Node key or vector from which distances are to be computed.
other_nodes : {iterable of str, iterable of int, None}, optional
For each node in `other_nodes` distance from `node_or_vector` is computed.
If None or empty, distance of `node_or_vector` from all nodes in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all nodes in `other_nodes` from input `node_or_vector`,
in the same order as `other_nodes`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Check the distances between a word and a list of other words.
>>> model.kv.distances('mammal.n.01', ['carnivore.n.01', 'dog.n.01'])
array([2.97422988, 2.83007402])
>>> # Check the distances between a word and every other word in the vocab.
>>> all_distances = model.kv.distances('mammal.n.01')
Raises
------
KeyError
If either `node_or_vector` or any node in `other_nodes` is absent from vocab.
"""
if isinstance(node_or_vector, str):
input_vector = self.get_vector(node_or_vector)
else:
input_vector = node_or_vector
if not other_nodes:
other_vectors = self.vectors
else:
other_indices = [self.get_index(node) for node in other_nodes]
other_vectors = self.vectors[other_indices]
return self.vector_distance_batch(input_vector, other_vectors)
def norm(self, node_or_vector):
"""Compute absolute position in hierarchy of input node or vector.
Values range between 0 and 1. A lower value indicates the input node or vector is higher in the hierarchy.
Parameters
----------
node_or_vector : {str, int, numpy.array}
Input node key or vector for which position in hierarchy is to be returned.
Returns
-------
float
Absolute position in the hierarchy of the input vector or node.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Get the norm of the embedding of the word `mammal`.
>>> model.kv.norm('mammal.n.01')
0.6423008703542398
Notes
-----
The position in hierarchy is based on the norm of the vector for the node.
"""
if isinstance(node_or_vector, str):
input_vector = self.get_vector(node_or_vector)
else:
input_vector = node_or_vector
return np.linalg.norm(input_vector)
def difference_in_hierarchy(self, node_or_vector_1, node_or_vector_2):
"""Compute relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
A positive value indicates `node_or_vector_1` is higher in the hierarchy than `node_or_vector_2`.
Parameters
----------
node_or_vector_1 : {str, int, numpy.array}
Input node key or vector.
node_or_vector_2 : {str, int, numpy.array}
Input node key or vector.
Returns
-------
float
Relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> model.kv.difference_in_hierarchy('mammal.n.01', 'dog.n.01')
0.05382517902410999
>>> model.kv.difference_in_hierarchy('dog.n.01', 'mammal.n.01')
-0.05382517902410999
Notes
-----
The returned value can be positive or negative, depending on whether `node_or_vector_1` is higher
or lower in the hierarchy than `node_or_vector_2`.
"""
return self.norm(node_or_vector_2) - self.norm(node_or_vector_1)
class PoincareRelations:
"""Stream relations for `PoincareModel` from a tsv-like file."""
def __init__(self, file_path, encoding='utf8', delimiter='\t'):
"""Initialize instance from file containing a pair of nodes (a relation) per line.
Parameters
----------
file_path : str
Path to file containing a pair of nodes (a relation) per line, separated by `delimiter`.
Since the relations are asymmetric, the order of `u` and `v` nodes in each pair matters.
To express a "u is v" relation, the lines should take the form `u delimeter v`.
e.g: `kangaroo mammal` is a tab-delimited line expressing a "`kangaroo is a mammal`" relation.
For a full input file example, see `gensim/test/test_data/poincare_hypernyms.tsv
<https://github.com/RaRe-Technologies/gensim/blob/master/gensim/test/test_data/poincare_hypernyms.tsv>`_.
encoding : str, optional
Character encoding of the input file.
delimiter : str, optional
Delimiter character for each relation.
"""
self.file_path = file_path
self.encoding = encoding
self.delimiter = delimiter
def __iter__(self):
"""Stream relations from self.file_path decoded into unicode strings.
Yields
-------
(unicode, unicode)
Relation from input file.
"""
with utils.open(self.file_path, 'rb') as file_obj:
if sys.version_info[0] < 3:
lines = file_obj
else:
lines = (line.decode(self.encoding) for line in file_obj)
# csv.reader requires bytestring input in python2, unicode input in python3
reader = csv.reader(lines, delimiter=self.delimiter)
for row in reader:
if sys.version_info[0] < 3:
row = [value.decode(self.encoding) for value in row]
yield tuple(row)
class NegativesBuffer:
"""Buffer and return negative samples."""
def __init__(self, items):
"""Initialize instance from list or numpy array of samples.
Parameters
----------
items : list/numpy.array
List or array containing negative samples.
"""
self._items = items
self._current_index = 0
def num_items(self):
"""Get the number of items remaining in the buffer.
Returns
-------
int
Number of items in the buffer that haven't been consumed yet.
"""
return len(self._items) - self._current_index
def get_items(self, num_items):
"""Get the next `num_items` from buffer.
Parameters
----------
num_items : int
Number of items to fetch.
Returns
-------
numpy.array or list
Slice containing `num_items` items from the original data.
Notes
-----
No error is raised if less than `num_items` items are remaining,
simply all the remaining items are returned.
"""
start_index = self._current_index
end_index = start_index + num_items
self._current_index += num_items
return self._items[start_index:end_index]
class ReconstructionEvaluation:
"""Evaluate reconstruction on given network for given embedding."""
def __init__(self, file_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
file_path : str
Path to tsv file containing relation pairs.
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to be evaluated.
"""
items = set()
relations = defaultdict(set)
with utils.open(file_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding.get_index(row[0])
item_2_index = embedding.get_index(row[1])
relations[item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_positive_relation_ranks_and_avg_prec(all_distances, positive_relations):
"""Compute ranks and Average Precision of positive relations.
Parameters
----------
all_distances : numpy.array of float
Array of all distances (floats) for a specific item.
positive_relations : list
List of indices of positive relations for the item.
Returns
-------
(list of int, float)
The list contains ranks of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking, e.g. ([1, 2, 3, 20], 0.610).
"""
positive_relation_distances = all_distances[positive_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[positive_relations] = True
# Compute how many negative relation distances are less than each positive relation distance, plus 1 for rank
ranks = (negative_relation_distances < positive_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the reconstruction task.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict of (str, float)
(metric_name, metric_value) pairs, e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for reconstruction.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
(float, float)
(mean_rank, MAP), e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations:
continue
item_relations = list(self.relations[item])
item_term = self.embedding.index_to_key[item]
item_distances = self.embedding.distances(item_term)
positive_relation_ranks, avg_precision = \
self.get_positive_relation_ranks_and_avg_prec(item_distances, item_relations)
ranks += positive_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LinkPredictionEvaluation:
"""Evaluate reconstruction on given network for given embedding."""
def __init__(self, train_path, test_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
train_path : str
Path to tsv file containing relation pairs used for training.
test_path : str
Path to tsv file containing relation pairs to evaluate.
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to be evaluated.
"""
items = set()
relations = {'known': defaultdict(set), 'unknown': defaultdict(set)}
data_files = {'known': train_path, 'unknown': test_path}
for relation_type, data_file in data_files.items():
with utils.open(data_file, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding.get_index(row[0])
item_2_index = embedding.get_index(row[1])
relations[relation_type][item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_unknown_relation_ranks_and_avg_prec(all_distances, unknown_relations, known_relations):
"""Compute ranks and Average Precision of unknown positive relations.
Parameters
----------
all_distances : numpy.array of float
Array of all distances for a specific item.
unknown_relations : list of int
List of indices of unknown positive relations.
known_relations : list of int
List of indices of known positive relations.
Returns
-------
tuple (list of int, float)
The list contains ranks of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking, e.g. ([1, 2, 3, 20], 0.610).
"""
unknown_relation_distances = all_distances[unknown_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[unknown_relations] = True
negative_relation_distances.mask[known_relations] = True
# Compute how many negative relation distances are less than each unknown relation distance, plus 1 for rank
ranks = (negative_relation_distances < unknown_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the link prediction task.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict of (str, float)
(metric_name, metric_value) pairs, e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for link prediction.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
tuple (float, float)
(mean_rank, MAP), e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations['unknown']: # No positive relations to predict for this node
continue
unknown_relations = list(self.relations['unknown'][item])
known_relations = list(self.relations['known'][item])
item_term = self.embedding.index_to_key[item]
item_distances = self.embedding.distances(item_term)
unknown_relation_ranks, avg_precision = \
self.get_unknown_relation_ranks_and_avg_prec(item_distances, unknown_relations, known_relations)
ranks += unknown_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LexicalEntailmentEvaluation:
"""Evaluate reconstruction on given network for any embedding."""
def __init__(self, filepath):
"""Initialize evaluation instance with HyperLex text file containing relation pairs.
Parameters
----------
filepath : str
Path to HyperLex text file.
"""
expected_scores = {}
with utils.open(filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=' ')
for row in reader:
word_1, word_2 = row['WORD1'], row['WORD2']
expected_scores[(word_1, word_2)] = float(row['AVG_SCORE'])
self.scores = expected_scores
self.alpha = 1000
def score_function(self, embedding, trie, term_1, term_2):
"""Compute predicted score - extent to which `term_1` is a type of `term_2`.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to use for computing predicted score.
trie : :class:`pygtrie.Trie`
Trie to use for finding matching vocab terms for input terms.
term_1 : str
Input term.
term_2 : str
Input term.
Returns
-------
float
Predicted score (the extent to which `term_1` is a type of `term_2`).
"""
try:
word_1_terms = self.find_matching_terms(trie, term_1)
word_2_terms = self.find_matching_terms(trie, term_2)
except KeyError:
raise ValueError("No matching terms found for either %s or %s" % (term_1, term_2))
min_distance = np.inf
min_term_1, min_term_2 = None, None
for term_1 in word_1_terms:
for term_2 in word_2_terms:
distance = embedding.distance(term_1, term_2)
if distance < min_distance:
min_term_1, min_term_2 = term_1, term_2
min_distance = distance
assert min_term_1 is not None and min_term_2 is not None
vector_1, vector_2 = embedding.get_vector(min_term_1), embedding.get_vector(min_term_2)
norm_1, norm_2 = np.linalg.norm(vector_1), np.linalg.norm(vector_2)
return -1 * (1 + self.alpha * (norm_2 - norm_1)) * min_distance
@staticmethod
def find_matching_terms(trie, word):
"""Find terms in the `trie` beginning with the `word`.
Parameters
----------
trie : :class:`pygtrie.Trie`
Trie to use for finding matching terms.
word : str
Input word to use for prefix search.
Returns
-------
list of str
List of matching terms.
"""
matches = trie.items('%s.' % word)
matching_terms = [''.join(key_chars) for key_chars, value in matches]
return matching_terms
@staticmethod
def create_vocab_trie(embedding):
"""Create trie with vocab terms of the given embedding to enable quick prefix searches.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding for which trie is to be created.
Returns
-------
:class:`pygtrie.Trie`
Trie containing vocab terms of the input embedding.
"""
try:
from pygtrie import Trie
except ImportError:
raise ImportError(
'pygtrie could not be imported, please install pygtrie in order to use LexicalEntailmentEvaluation')
vocab_trie = Trie()
for key in embedding.key_to_index:
vocab_trie[key] = True
return vocab_trie
def evaluate_spearman(self, embedding):
"""Evaluate spearman scores for lexical entailment for given embedding.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding for which evaluation is to be done.
Returns
-------
float
Spearman correlation score for the task for input embedding.
"""
predicted_scores = []
expected_scores = []
skipped = 0
count = 0
vocab_trie = self.create_vocab_trie(embedding)
for (word_1, word_2), expected_score in self.scores.items():
try:
predicted_score = self.score_function(embedding, vocab_trie, word_1, word_2)
except ValueError:
skipped += 1
continue
count += 1
predicted_scores.append(predicted_score)
expected_scores.append(expected_score)
logger.info('skipped pairs: %d out of %d' % (skipped, len(self.scores)))
spearman = spearmanr(expected_scores, predicted_scores)
return spearman.correlation
| 70,370
|
Python
|
.py
| 1,457
| 38.0151
| 120
| 0.610969
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,118
|
normmodel.py
|
piskvorky_gensim/gensim/models/normmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
import logging
from gensim import interfaces, matutils
logger = logging.getLogger(__name__)
class NormModel(interfaces.TransformationABC):
"""Objects of this class realize the explicit normalization of vectors (l1 and l2)."""
def __init__(self, corpus=None, norm='l2'):
r"""Compute the l1 or l2 normalization by normalizing separately for each document in a corpus.
If :math:`v_{i,j}` is the 'i'th component of the vector representing document 'j', the l1 normalization is
.. math:: l1_{i, j} = \frac{v_{i,j}}{\sum_k |v_{k,j}|}
the l2 normalization is
.. math:: l2_{i, j} = \frac{v_{i,j}}{\sqrt{\sum_k v_{k,j}^2}}
Parameters
----------
corpus : iterable of iterable of (int, number), optional
Input corpus.
norm : {'l1', 'l2'}, optional
Norm used to normalize.
"""
self.norm = norm
if corpus is not None:
self.calc_norm(corpus)
else:
pass
def __str__(self):
return "%s<num_docs=%s, num_nnz=%s, norm=%s>" % (
self.__class__.__name__, self.num_docs, self.num_nnz, self.norm
)
def calc_norm(self, corpus):
"""Calculate the norm by calling :func:`~gensim.matutils.unitvec` with the norm parameter.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus.
"""
logger.info("Performing %s normalization...", self.norm)
norms = []
numnnz = 0
docno = 0
for bow in corpus:
docno += 1
numnnz += len(bow)
norms.append(matutils.unitvec(bow, self.norm))
self.num_docs = docno
self.num_nnz = numnnz
self.norms = norms
def normalize(self, bow):
"""Normalize a simple count representation.
Parameters
----------
bow : list of (int, number)
Document in BoW format.
Returns
-------
list of (int, number)
Normalized document.
"""
vector = matutils.unitvec(bow, self.norm)
return vector
def __getitem__(self, bow):
"""Call the :func:`~gensim.models.normmodel.NormModel.normalize`.
Parameters
----------
bow : list of (int, number)
Document in BoW format.
Returns
-------
list of (int, number)
Normalized document.
"""
return self.normalize(bow)
| 2,736
|
Python
|
.py
| 75
| 27.72
| 114
| 0.564706
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,119
|
basemodel.py
|
piskvorky_gensim/gensim/models/basemodel.py
|
class BaseTopicModel:
def print_topic(self, topicno, topn=10):
"""Get a single topic as a formatted string.
Parameters
----------
topicno : int
Topic id.
topn : int
Number of words from topic that will be used.
Returns
-------
str
String representation of topic, like '-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + ... '.
"""
return ' + '.join('%.3f*"%s"' % (v, k) for k, v in self.show_topic(topicno, topn))
def print_topics(self, num_topics=20, num_words=10):
"""Get the most significant topics (alias for `show_topics()` method).
Parameters
----------
num_topics : int, optional
The number of topics to be selected, if -1 - all topics will be in result (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
Returns
-------
list of (int, list of (str, float))
Sequence with (topic_id, [(word, value), ... ]).
"""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def get_topics(self):
"""Get words X topics matrix.
Returns
--------
numpy.ndarray:
The term topic matrix learned during inference, shape (`num_topics`, `vocabulary_size`).
Raises
------
NotImplementedError
"""
raise NotImplementedError
| 1,554
|
Python
|
.py
| 40
| 29.35
| 114
| 0.553928
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,120
|
lda_worker.py
|
piskvorky_gensim/gensim/models/lda_worker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Worker ("slave") process used in computing distributed Latent Dirichlet Allocation
(LDA, :class:`~gensim.models.ldamodel.LdaModel`).
Run this script on every node in your cluster. If you wish, you may even run it multiple times on a single machine,
to make better use of multiple cores (just beware that memory footprint increases accordingly).
How to use distributed :class:`~gensim.models.ldamodel.LdaModel`
----------------------------------------------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lda_worker &
#. Run dispatcher ::
python -m gensim.models.lda_dispatcher &
#. Run :class:`~gensim.models.ldamodel.LdaModel` in distributed mode :
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LdaModel
>>>
>>> model = LdaModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lda_worker --help
:ellipsis: 0, -7
"""
from __future__ import with_statement
import os
import sys
import logging
import threading
import tempfile
import argparse
try:
import Queue
except ImportError:
import queue as Queue
import Pyro4
from gensim.models import ldamodel
from gensim import utils
logger = logging.getLogger('gensim.models.lda_worker')
# periodically save intermediate models after every SAVE_DEBUG updates (0 for never)
SAVE_DEBUG = 0
LDA_WORKER_PREFIX = 'gensim.lda_worker'
class Worker:
"""Used as a Pyro4 class with exposed methods.
Exposes every non-private method and property of the class automatically to be available for remote access.
"""
def __init__(self):
"""Partly initialize the model."""
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
"""Fully initialize the worker.
Parameters
----------
myid : int
An ID number used to identify this worker in the dispatcher object.
dispatcher : :class:`~gensim.models.lda_dispatcher.Dispatcher`
The dispatcher responsible for scheduling this worker.
**model_params
Keyword parameters to initialize the inner LDA model,see :class:`~gensim.models.ldamodel.LdaModel`.
"""
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
# id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.myid = myid
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s", myid)
self.model = ldamodel.LdaModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""Request jobs from the dispatcher, in a perpetual loop until :meth:`gensim.models.lda_worker.Worker.getstate`
is called.
Raises
------
RuntimeError
If `self.model` is None (i.e. worker non initialized).
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i", self.myid, self.jobsdone)
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs", self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
"""Incrementally process the job and potentially logs progress.
Parameters
----------
job : iterable of list of (int, float)
Corpus in BoW format.
"""
logger.debug("starting to process job #%i", self.jobsdone)
self.model.do_estep(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lda_worker.pkl')
self.model.save(fname)
logger.info("finished processing job #%i", self.jobsdone - 1)
@Pyro4.expose
def ping(self):
"""Test the connectivity with Worker."""
return True
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
"""Log and get the LDA model's current state.
Returns
-------
result : :class:`~gensim.models.ldamodel.LdaState`
The current state.
"""
logger.info("worker #%i returning its state after %s jobs", self.myid, self.jobsdone)
result = self.model.state
assert isinstance(result, ldamodel.LdaState)
self.model.clear() # free up mem in-between two EM cycles
self.finished = True
return result
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self, state):
"""Reset the worker by setting sufficient stats to 0.
Parameters
----------
state : :class:`~gensim.models.ldamodel.LdaState`
Encapsulates information for distributed computation of LdaModel objects.
"""
assert state is not None
logger.info("resetting worker #%i", self.myid)
self.model.state = state
self.model.sync_state()
self.model.state.reset()
self.finished = False
@Pyro4.oneway
def exit(self):
"""Terminate the worker."""
logger.info("terminating worker #%i", self.myid)
os._exit(0)
def main():
parser = argparse.ArgumentParser(description=__doc__[:-130], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument(
"--no-broadcast", help="Disable broadcast (default: %(default)s)", action='store_const',
default=True, const=False
)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument(
'-v', '--verbose', help='Verbose flag', action='store_const', dest="loglevel",
const=logging.INFO, default=logging.WARNING
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {
"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac
}
utils.pyro_daemon(LDA_WORKER_PREFIX, Worker(), random_suffix=True, ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
| 7,555
|
Python
|
.py
| 183
| 34.387978
| 119
| 0.651176
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,121
|
fasttext_corpusfile.pyx
|
piskvorky_gensim/gensim/models/fasttext_corpusfile.pyx
|
#!/usr/bin/env cython
# distutils: language = c++
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
#
# Copyright (C) 2018 Dmitry Persiyanov <dmitry.persiyanov@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized cython functions for file-based training :class:`~gensim.models.fasttext.FastText` model."""
import numpy as np
cimport numpy as np
from libcpp.string cimport string
from libcpp.vector cimport vector
from gensim.models.fasttext_inner cimport (
fasttext_fast_sentence_sg_hs,
fasttext_fast_sentence_sg_neg,
fasttext_fast_sentence_cbow_hs,
fasttext_fast_sentence_cbow_neg,
init_ft_config,
FastTextConfig
)
from gensim.models.word2vec_inner cimport random_int32
from gensim.models.word2vec_corpusfile cimport (
VocabItem,
CythonVocab,
CythonLineSentence,
get_alpha,
get_next_alpha,
cvocab_t
)
ctypedef np.float32_t REAL_t
DEF MAX_SENTENCE_LEN = 10000
DEF MAX_SUBWORDS = 1000
cdef void prepare_c_structures_for_batch(
vector[vector[string]] &sentences, int sample, int hs, int window, long long *total_words,
int *effective_words, int *effective_sentences, unsigned long long *next_random, cvocab_t *vocab,
int *sentence_idx, np.uint32_t *indexes, int *codelens, np.uint8_t **codes, np.uint32_t **points,
np.uint32_t *reduced_windows, int *subwords_idx_len, np.uint32_t **subwords_idx, int shrink_windows,
) nogil:
cdef VocabItem word
cdef string token
cdef vector[string] sent
sentence_idx[0] = 0 # indices of the first sentence always start at 0
for sent in sentences:
if sent.empty():
continue # ignore empty sentences; leave effective_sentences unchanged
total_words[0] += sent.size()
for token in sent:
# leaving `effective_words` unchanged = shortening the sentence = expanding the window
if vocab[0].find(token) == vocab[0].end():
continue
word = vocab[0][token]
if sample and word.sample_int < random_int32(next_random):
continue
indexes[effective_words[0]] = word.index
subwords_idx_len[effective_words[0]] = word.subword_idx_len
subwords_idx[effective_words[0]] = word.subword_idx
if hs:
codelens[effective_words[0]] = word.code_len
codes[effective_words[0]] = word.code
points[effective_words[0]] = word.point
effective_words[0] += 1
if effective_words[0] == MAX_SENTENCE_LEN:
break
# keep track of which words go into which sentence, so we don't train
# across sentence boundaries.
# indices of sentence number X are between <sentence_idx[X], sentence_idx[X])
effective_sentences[0] += 1
sentence_idx[effective_sentences[0]] = effective_words[0]
if effective_words[0] == MAX_SENTENCE_LEN:
break
# precompute "reduced window" offsets in a single randint() call
if shrink_windows:
for i in range(effective_words[0]):
reduced_windows[i] = random_int32(next_random) % window
else:
for i in range(effective_words[0]):
reduced_windows[i] = 0
def train_epoch_sg(
model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words, _work, _l1):
"""Train Skipgram model for one epoch by training on an input stream. This function is used only in multistream mode.
Called internally from :meth:`~gensim.models.fasttext.FastText.train`.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
The FastText model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
_work : np.ndarray
Private working memory for each worker.
_l1 : np.ndarray
Private working memory for each worker.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef FastTextConfig c
# For learning rate updates
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef long long total_sentences = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
init_ft_config(&c, model, _alpha, _work, _l1)
# for preparing batches & training
cdef vector[vector[string]] sentences
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_sentences = 0
effective_words = 0
sentences = input_stream.next_batch()
prepare_c_structures_for_batch(
sentences, c.sample, c.hs, c.window, &total_words, &effective_words, &effective_sentences,
&c.next_random, vocab.get_vocab_ptr(), c.sentence_idx, c.indexes, c.codelens,
c.codes, c.points, c.reduced_windows, c.subwords_idx_len, c.subwords_idx, shrink_windows)
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
for j in range(j, k):
if j == i:
continue
if c.hs:
fasttext_fast_sentence_sg_hs(&c, i, j)
if c.negative:
fasttext_fast_sentence_sg_neg(&c, i, j)
total_sentences += sentences.size()
total_effective_words += effective_words
c.alpha = get_next_alpha(start_alpha, end_alpha, total_sentences, total_words,
expected_examples, expected_words, cur_epoch, num_epochs)
return total_sentences, total_effective_words, total_words
def train_epoch_cbow(model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words, _work,
_neu1):
"""Train CBOW model for one epoch by training on an input stream. This function is used only in multistream mode.
Called internally from :meth:`~gensim.models.fasttext.FastText.train`.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
The FastText model instance to train.
corpus_file : str
Path to a corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef FastTextConfig c
# For learning rate updates
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef long long total_sentences = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
init_ft_config(&c, model, _alpha, _work, _neu1)
# for preparing batches & training
cdef vector[vector[string]] sentences
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_sentences = 0
effective_words = 0
sentences = input_stream.next_batch()
prepare_c_structures_for_batch(
sentences, c.sample, c.hs, c.window, &total_words, &effective_words, &effective_sentences,
&c.next_random, vocab.get_vocab_ptr(), c.sentence_idx, c.indexes, c.codelens,
c.codes, c.points, c.reduced_windows, c.subwords_idx_len, c.subwords_idx, shrink_windows)
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
if c.hs:
fasttext_fast_sentence_cbow_hs(&c, i, j, k)
if c.negative:
fasttext_fast_sentence_cbow_neg(&c, i, j, k)
total_sentences += sentences.size()
total_effective_words += effective_words
c.alpha = get_next_alpha(start_alpha, end_alpha, total_sentences, total_words,
expected_examples, expected_words, cur_epoch, num_epochs)
return total_sentences, total_effective_words, total_words
CORPUSFILE_VERSION = 1
| 10,844
|
Python
|
.py
| 227
| 38.167401
| 121
| 0.6341
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,122
|
coherencemodel.py
|
piskvorky_gensim/gensim/models/coherencemodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Calculate topic coherence for topic models. This is the implementation of the four stage topic coherence pipeline
from the paper `Michael Roeder, Andreas Both and Alexander Hinneburg: "Exploring the space of topic coherence measures"
<http://svn.aksw.org/papers/2015/WSDM_Topic_Evaluation/public.pdf>`_.
Typically, :class:`~gensim.models.coherencemodel.CoherenceModel` used for evaluation of topic models.
The four stage pipeline is basically:
* Segmentation
* Probability Estimation
* Confirmation Measure
* Aggregation
Implementation of this pipeline allows for the user to in essence "make" a coherence measure of his/her choice
by choosing a method in each of the pipelines.
See Also
--------
:mod:`gensim.topic_coherence`
Internal functions for pipelines.
"""
import logging
import multiprocessing as mp
from collections import namedtuple
import numpy as np
from gensim import interfaces, matutils
from gensim import utils
from gensim.topic_coherence import (
segmentation, probability_estimation,
direct_confirmation_measure, indirect_confirmation_measure,
aggregation,
)
from gensim.topic_coherence.probability_estimation import unique_ids_from_segments
logger = logging.getLogger(__name__)
BOOLEAN_DOCUMENT_BASED = {'u_mass'}
SLIDING_WINDOW_BASED = {'c_v', 'c_uci', 'c_npmi', 'c_w2v'}
_make_pipeline = namedtuple('Coherence_Measure', 'seg, prob, conf, aggr')
COHERENCE_MEASURES = {
'u_mass': _make_pipeline(
segmentation.s_one_pre,
probability_estimation.p_boolean_document,
direct_confirmation_measure.log_conditional_probability,
aggregation.arithmetic_mean
),
'c_v': _make_pipeline(
segmentation.s_one_set,
probability_estimation.p_boolean_sliding_window,
indirect_confirmation_measure.cosine_similarity,
aggregation.arithmetic_mean
),
'c_w2v': _make_pipeline(
segmentation.s_one_set,
probability_estimation.p_word2vec,
indirect_confirmation_measure.word2vec_similarity,
aggregation.arithmetic_mean
),
'c_uci': _make_pipeline(
segmentation.s_one_one,
probability_estimation.p_boolean_sliding_window,
direct_confirmation_measure.log_ratio_measure,
aggregation.arithmetic_mean
),
'c_npmi': _make_pipeline(
segmentation.s_one_one,
probability_estimation.p_boolean_sliding_window,
direct_confirmation_measure.log_ratio_measure,
aggregation.arithmetic_mean
),
}
SLIDING_WINDOW_SIZES = {
'c_v': 110,
'c_w2v': 5,
'c_uci': 10,
'c_npmi': 10,
'u_mass': None
}
class CoherenceModel(interfaces.TransformationABC):
"""Objects of this class allow for building and maintaining a model for topic coherence.
Examples
---------
One way of using this feature is through providing a trained topic model. A dictionary has to be explicitly provided
if the model does not contain a dictionary already
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.ldamodel import LdaModel
>>> from gensim.models.coherencemodel import CoherenceModel
>>>
>>> model = LdaModel(common_corpus, 5, common_dictionary)
>>>
>>> cm = CoherenceModel(model=model, corpus=common_corpus, coherence='u_mass')
>>> coherence = cm.get_coherence() # get coherence value
Another way of using this feature is through providing tokenized topics such as:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.coherencemodel import CoherenceModel
>>> topics = [
... ['human', 'computer', 'system', 'interface'],
... ['graph', 'minors', 'trees', 'eps']
... ]
>>>
>>> cm = CoherenceModel(topics=topics, corpus=common_corpus, dictionary=common_dictionary, coherence='u_mass')
>>> coherence = cm.get_coherence() # get coherence value
"""
def __init__(self, model=None, topics=None, texts=None, corpus=None, dictionary=None,
window_size=None, keyed_vectors=None, coherence='c_v', topn=20, processes=-1):
"""
Parameters
----------
model : :class:`~gensim.models.basemodel.BaseTopicModel`, optional
Pre-trained topic model, should be provided if topics is not provided.
Currently supports :class:`~gensim.models.ldamodel.LdaModel`,
:class:`~gensim.models.ldamulticore.LdaMulticore`.
Use `topics` parameter to plug in an as yet unsupported model.
topics : list of list of str, optional
List of tokenized topics, if this is preferred over model - dictionary should be provided.
texts : list of list of str, optional
Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)
probability estimator .
corpus : iterable of list of (int, number), optional
Corpus in BoW format.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping of id word to create corpus.
If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.
window_size : int, optional
Is the size of the window to be used for coherence measures using boolean sliding window as their
probability estimator. For 'u_mass' this doesn't matter.
If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used.
Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.
For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus
using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
processes : int, optional
Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as
num_cpus - 1.
"""
if model is None and topics is None:
raise ValueError("One of model or topics has to be provided.")
elif topics is not None and dictionary is None:
raise ValueError("dictionary has to be provided if topics are to be used.")
self.keyed_vectors = keyed_vectors
if keyed_vectors is None and texts is None and corpus is None:
raise ValueError("One of texts or corpus has to be provided.")
# Check if associated dictionary is provided.
if dictionary is None:
if isinstance(model.id2word, utils.FakeDict):
raise ValueError(
"The associated dictionary should be provided with the corpus or 'id2word'"
" for topic model should be set as the associated dictionary.")
else:
self.dictionary = model.id2word
else:
self.dictionary = dictionary
# Check for correct inputs for u_mass coherence measure.
self.coherence = coherence
self.window_size = window_size
if self.window_size is None:
self.window_size = SLIDING_WINDOW_SIZES[self.coherence]
self.texts = texts
self.corpus = corpus
if coherence in BOOLEAN_DOCUMENT_BASED:
if utils.is_corpus(corpus)[0]:
self.corpus = corpus
elif self.texts is not None:
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
else:
raise ValueError(
"Either 'corpus' with 'dictionary' or 'texts' should "
"be provided for %s coherence.", coherence)
# Check for correct inputs for sliding window coherence measure.
elif coherence == 'c_w2v' and keyed_vectors is not None:
pass
elif coherence in SLIDING_WINDOW_BASED:
if self.texts is None:
raise ValueError("'texts' should be provided for %s coherence.", coherence)
else:
raise ValueError("%s coherence is not currently supported.", coherence)
self._topn = topn
self._model = model
self._accumulator = None
self._topics = None
self.topics = topics
self.processes = processes if processes >= 1 else max(1, mp.cpu_count() - 1)
@classmethod
def for_models(cls, models, dictionary, topn=20, **kwargs):
"""Initialize a CoherenceModel with estimated probabilities for all of the given models.
Use :meth:`~gensim.models.coherencemodel.CoherenceModel.for_topics` method.
Parameters
----------
models : list of :class:`~gensim.models.basemodel.BaseTopicModel`
List of models to evaluate coherence of, each of it should implements
:meth:`~gensim.models.basemodel.BaseTopicModel.get_topics` method.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Gensim dictionary mapping of id word.
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
kwargs : object
Sequence of arguments, see :meth:`~gensim.models.coherencemodel.CoherenceModel.for_topics`.
Return
------
:class:`~gensim.models.coherencemodel.CoherenceModel`
CoherenceModel with estimated probabilities for all of the given models.
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.ldamodel import LdaModel
>>> from gensim.models.coherencemodel import CoherenceModel
>>>
>>> m1 = LdaModel(common_corpus, 3, common_dictionary)
>>> m2 = LdaModel(common_corpus, 5, common_dictionary)
>>>
>>> cm = CoherenceModel.for_models([m1, m2], common_dictionary, corpus=common_corpus, coherence='u_mass')
"""
topics = [cls.top_topics_as_word_lists(model, dictionary, topn) for model in models]
kwargs['dictionary'] = dictionary
kwargs['topn'] = topn
return cls.for_topics(topics, **kwargs)
@staticmethod
def top_topics_as_word_lists(model, dictionary, topn=20):
"""Get `topn` topics as list of words.
Parameters
----------
model : :class:`~gensim.models.basemodel.BaseTopicModel`
Pre-trained topic model.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Gensim dictionary mapping of id word.
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
Return
------
list of list of str
Top topics in list-of-list-of-words format.
"""
if not dictionary.id2token:
dictionary.id2token = {v: k for k, v in dictionary.token2id.items()}
str_topics = []
for topic in model.get_topics():
bestn = matutils.argsort(topic, topn=topn, reverse=True)
beststr = [dictionary.id2token[_id] for _id in bestn]
str_topics.append(beststr)
return str_topics
@classmethod
def for_topics(cls, topics_as_topn_terms, **kwargs):
"""Initialize a CoherenceModel with estimated probabilities for all of the given topics.
Parameters
----------
topics_as_topn_terms : list of list of str
Each element in the top-level list should be the list of topics for a model.
The topics for the model should be a list of top-N words, one per topic.
Return
------
:class:`~gensim.models.coherencemodel.CoherenceModel`
CoherenceModel with estimated probabilities for all of the given models.
"""
if not topics_as_topn_terms:
raise ValueError("len(topics) must be > 0.")
if any(len(topic_lists) == 0 for topic_lists in topics_as_topn_terms):
raise ValueError("found empty topic listing in `topics`")
topn = 0
for topic_list in topics_as_topn_terms:
for topic in topic_list:
topn = max(topn, len(topic))
topn = min(kwargs.pop('topn', topn), topn)
super_topic = utils.flatten(topics_as_topn_terms)
logging.info(
"Number of relevant terms for all %d models: %d",
len(topics_as_topn_terms), len(super_topic))
cm = CoherenceModel(topics=[super_topic], topn=len(super_topic), **kwargs)
cm.estimate_probabilities()
cm.topn = topn
return cm
def __str__(self):
return str(self.measure)
@property
def model(self):
"""Get `self._model` field.
Return
------
:class:`~gensim.models.basemodel.BaseTopicModel`
Used model.
"""
return self._model
@model.setter
def model(self, model):
"""Set `self._model` field.
Parameters
----------
model : :class:`~gensim.models.basemodel.BaseTopicModel`
Input model.
"""
self._model = model
if model is not None:
new_topics = self._get_topics()
self._update_accumulator(new_topics)
self._topics = new_topics
@property
def topn(self):
"""Get number of top words `self._topn`.
Return
------
int
Integer corresponding to the number of top words.
"""
return self._topn
@topn.setter
def topn(self, topn):
"""Set number of top words `self._topn`.
Parameters
----------
topn : int
Number of top words.
"""
current_topic_length = len(self._topics[0])
requires_expansion = current_topic_length < topn
if self.model is not None:
self._topn = topn
if requires_expansion:
self.model = self._model # trigger topic expansion from model
else:
if requires_expansion:
raise ValueError("Model unavailable and topic sizes are less than topn=%d" % topn)
self._topn = topn # topics will be truncated in getter
@property
def measure(self):
"""Make pipeline, according to `coherence` parameter value.
Return
------
namedtuple
Pipeline that contains needed functions/method for calculated coherence.
"""
return COHERENCE_MEASURES[self.coherence]
@property
def topics(self):
"""Get topics `self._topics`.
Return
------
list of list of str
Topics as list of tokens.
"""
if len(self._topics[0]) > self._topn:
return [topic[:self._topn] for topic in self._topics]
else:
return self._topics
@topics.setter
def topics(self, topics):
"""Set topics `self._topics`.
Parameters
----------
topics : list of list of str
Topics.
"""
if topics is not None:
new_topics = []
for topic in topics:
topic_token_ids = self._ensure_elements_are_ids(topic)
new_topics.append(topic_token_ids)
if self.model is not None:
logger.warning(
"The currently set model '%s' may be inconsistent with the newly set topics",
self.model)
elif self.model is not None:
new_topics = self._get_topics()
logger.debug("Setting topics to those of the model: %s", self.model)
else:
new_topics = None
self._update_accumulator(new_topics)
self._topics = new_topics
def _ensure_elements_are_ids(self, topic):
ids_from_tokens = [self.dictionary.token2id[t] for t in topic if t in self.dictionary.token2id]
ids_from_ids = [i for i in topic if i in self.dictionary]
if len(ids_from_tokens) > len(ids_from_ids):
return np.array(ids_from_tokens)
elif len(ids_from_ids) > len(ids_from_tokens):
return np.array(ids_from_ids)
else:
raise ValueError('unable to interpret topic as either a list of tokens or a list of ids')
def _update_accumulator(self, new_topics):
if self._relevant_ids_will_differ(new_topics):
logger.debug("Wiping cached accumulator since it does not contain all relevant ids.")
self._accumulator = None
def _relevant_ids_will_differ(self, new_topics):
if self._accumulator is None or not self._topics_differ(new_topics):
return False
new_set = unique_ids_from_segments(self.measure.seg(new_topics))
return not self._accumulator.relevant_ids.issuperset(new_set)
def _topics_differ(self, new_topics):
return (new_topics is not None
and self._topics is not None
and not np.array_equal(new_topics, self._topics))
def _get_topics(self):
"""Internal helper function to return topics from a trained topic model."""
return self._get_topics_from_model(self.model, self.topn)
@staticmethod
def _get_topics_from_model(model, topn):
"""Internal helper function to return topics from a trained topic model.
Parameters
----------
model : :class:`~gensim.models.basemodel.BaseTopicModel`
Pre-trained topic model.
topn : int
Integer corresponding to the number of top words.
Return
------
list of :class:`numpy.ndarray`
Topics matrix
"""
try:
return [
matutils.argsort(topic, topn=topn, reverse=True) for topic in
model.get_topics()
]
except AttributeError:
raise ValueError(
"This topic model is not currently supported. Supported topic models"
" should implement the `get_topics` method.")
def segment_topics(self):
"""Segment topic, alias for `self.measure.seg(self.topics)`.
Return
------
list of list of pair
Segmented topics.
"""
return self.measure.seg(self.topics)
def estimate_probabilities(self, segmented_topics=None):
"""Accumulate word occurrences and co-occurrences from texts or corpus using the optimal method for the chosen
coherence metric.
Notes
-----
This operation may take quite some time for the sliding window based coherence methods.
Parameters
----------
segmented_topics : list of list of pair, optional
Segmented topics, typically produced by :meth:`~gensim.models.coherencemodel.CoherenceModel.segment_topics`.
Return
------
:class:`~gensim.topic_coherence.text_analysis.CorpusAccumulator`
Corpus accumulator.
"""
if segmented_topics is None:
segmented_topics = self.segment_topics()
if self.coherence in BOOLEAN_DOCUMENT_BASED:
self._accumulator = self.measure.prob(self.corpus, segmented_topics)
else:
kwargs = dict(
texts=self.texts, segmented_topics=segmented_topics,
dictionary=self.dictionary, window_size=self.window_size,
processes=self.processes)
if self.coherence == 'c_w2v':
kwargs['model'] = self.keyed_vectors
self._accumulator = self.measure.prob(**kwargs)
return self._accumulator
def get_coherence_per_topic(self, segmented_topics=None, with_std=False, with_support=False):
"""Get list of coherence values for each topic based on pipeline parameters.
Parameters
----------
segmented_topics : list of list of (int, number)
Topics.
with_std : bool, optional
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as the number of pairwise
similarity comparisons were used to compute the overall topic coherence.
Return
------
list of float
Sequence of similarity measure for each topic.
"""
measure = self.measure
if segmented_topics is None:
segmented_topics = measure.seg(self.topics)
if self._accumulator is None:
self.estimate_probabilities(segmented_topics)
kwargs = dict(with_std=with_std, with_support=with_support)
if self.coherence in BOOLEAN_DOCUMENT_BASED or self.coherence == 'c_w2v':
pass
elif self.coherence == 'c_v':
kwargs['topics'] = self.topics
kwargs['measure'] = 'nlr'
kwargs['gamma'] = 1
else:
kwargs['normalize'] = (self.coherence == 'c_npmi')
return measure.conf(segmented_topics, self._accumulator, **kwargs)
def aggregate_measures(self, topic_coherences):
"""Aggregate the individual topic coherence measures using the pipeline's aggregation function.
Use `self.measure.aggr(topic_coherences)`.
Parameters
----------
topic_coherences : list of float
List of calculated confirmation measure on each set in the segmented topics.
Returns
-------
float
Arithmetic mean of all the values contained in confirmation measures.
"""
return self.measure.aggr(topic_coherences)
def get_coherence(self):
"""Get coherence value based on pipeline parameters.
Returns
-------
float
Value of coherence.
"""
confirmed_measures = self.get_coherence_per_topic()
return self.aggregate_measures(confirmed_measures)
def compare_models(self, models):
"""Compare topic models by coherence value.
Parameters
----------
models : :class:`~gensim.models.basemodel.BaseTopicModel`
Sequence of topic models.
Returns
-------
list of (float, float)
Sequence of pairs of average topic coherence and average coherence.
"""
model_topics = [self._get_topics_from_model(model, self.topn) for model in models]
return self.compare_model_topics(model_topics)
def compare_model_topics(self, model_topics):
"""Perform the coherence evaluation for each of the models.
Parameters
----------
model_topics : list of list of str
list of list of words for the model trained with that number of topics.
Returns
-------
list of (float, float)
Sequence of pairs of average topic coherence and average coherence.
Notes
-----
This first precomputes the probabilities once, then evaluates coherence for each model.
Since we have already precomputed the probabilities, this simply involves using the accumulated stats in the
:class:`~gensim.models.coherencemodel.CoherenceModel` to perform the evaluations, which should be pretty quick.
"""
orig_topics = self._topics
orig_topn = self.topn
try:
coherences = self._compare_model_topics(model_topics)
finally:
self.topics = orig_topics
self.topn = orig_topn
return coherences
def _compare_model_topics(self, model_topics):
"""Get average topic and model coherences.
Parameters
----------
model_topics : list of list of str
Topics from the model.
Returns
-------
list of (float, float)
Sequence of pairs of average topic coherence and average coherence.
"""
coherences = []
last_topn_value = min(self.topn - 1, 4)
topn_grid = list(range(self.topn, last_topn_value, -5))
for model_num, topics in enumerate(model_topics):
self.topics = topics
# We evaluate at various values of N and average them. This is a more robust,
# according to: http://people.eng.unimelb.edu.au/tbaldwin/pubs/naacl2016.pdf
coherence_at_n = {}
for n in topn_grid:
self.topn = n
topic_coherences = self.get_coherence_per_topic()
# Let's record the coherences for each topic, as well as the aggregated
# coherence across all of the topics.
# Some of them may be nan (if all words were OOV), so do mean value imputation.
filled_coherences = np.array(topic_coherences)
filled_coherences[np.isnan(filled_coherences)] = np.nanmean(filled_coherences)
coherence_at_n[n] = (topic_coherences, self.aggregate_measures(filled_coherences))
topic_coherences, avg_coherences = zip(*coherence_at_n.values())
avg_topic_coherences = np.vstack(topic_coherences).mean(0)
model_coherence = np.mean(avg_coherences)
logging.info("Avg coherence for model %d: %.5f" % (model_num, model_coherence))
coherences.append((avg_topic_coherences, model_coherence))
return coherences
| 26,161
|
Python
|
.py
| 579
| 35.386874
| 120
| 0.626896
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,123
|
__init__.py
|
piskvorky_gensim/gensim/models/__init__.py
|
"""
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .coherencemodel import CoherenceModel # noqa:F401
from .hdpmodel import HdpModel # noqa:F401
from .ldamodel import LdaModel # noqa:F401
from .lsimodel import LsiModel # noqa:F401
from .tfidfmodel import TfidfModel # noqa:F401
from .bm25model import OkapiBM25Model, LuceneBM25Model, AtireBM25Model # noqa:F401
from .rpmodel import RpModel # noqa:F401
from .logentropy_model import LogEntropyModel # noqa:F401
from .word2vec import Word2Vec, FAST_VERSION # noqa:F401
from .doc2vec import Doc2Vec # noqa:F401
from .keyedvectors import KeyedVectors # noqa:F401
from .ldamulticore import LdaMulticore # noqa:F401
from .phrases import Phrases # noqa:F401
from .normmodel import NormModel # noqa:F401
from .atmodel import AuthorTopicModel # noqa:F401
from .ldaseqmodel import LdaSeqModel # noqa:F401
from .fasttext import FastText # noqa:F401
from .translation_matrix import TranslationMatrix, BackMappingTranslationMatrix # noqa:F401
from .ensemblelda import EnsembleLda # noqa:F401
from .nmf import Nmf # noqa:F401
from gensim import interfaces, utils
class VocabTransform(interfaces.TransformationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online":
.. sourcecode:: pycon
>>> old2new = {oldid: newid for newid, oldid in enumerate(ids_you_want_to_keep)}
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> pass
"""
def __init__(self, old2new, id2token=None):
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
| 2,519
|
Python
|
.py
| 52
| 44.134615
| 102
| 0.739308
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,124
|
atmodel.py
|
piskvorky_gensim/gensim/models/atmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Olavur Mortensen <olavurmortensen@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Author-topic model.
This module trains the author-topic model on documents and corresponding author-document dictionaries.
The training is online and is constant in memory w.r.t. the number of documents.
The model is *not* constant in memory w.r.t. the number of authors.
The model can be updated with additional documents after training has been completed. It is
also possible to continue training on the existing data.
The model is closely related to :class:`~gensim.models.ldamodel.LdaModel`.
The :class:`~gensim.models.atmodel.AuthorTopicModel` class inherits :class:`~gensim.models.ldamodel.LdaModel`,
and its usage is thus similar.
The model was introduced by `Rosen-Zvi and co-authors: "The Author-Topic Model for Authors and Documents"
<https://arxiv.org/abs/1207.4169>`_. The model correlates the authorship information with the topics to give a better
insight on the subject knowledge of an author.
.. _'Online Learning for LDA' by Hoffman et al.: online-lda_
.. _online-lda: https://papers.neurips.cc/paper/2010/file/71f6278d140af599e06ad9bf1ba03cb0-Paper.pdf
Example
-------
.. sourcecode:: pycon
>>> from gensim.models import AuthorTopicModel
>>> from gensim.corpora import mmcorpus
>>> from gensim.test.utils import common_dictionary, datapath, temporary_file
>>> author2doc = {
... 'john': [0, 1, 2, 3, 4, 5, 6],
... 'jane': [2, 3, 4, 5, 6, 7, 8],
... 'jack': [0, 2, 4, 6, 8]
... }
>>>
>>> corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
>>>
>>> with temporary_file("serialized") as s_path:
... model = AuthorTopicModel(
... corpus, author2doc=author2doc, id2word=common_dictionary, num_topics=4,
... serialized=True, serialization_path=s_path
... )
...
... model.update(corpus, author2doc) # update the author-topic model with additional documents
>>>
>>> # construct vectors for authors
>>> author_vecs = [model.get_author_topics(author) for author in model.id2author.values()]
"""
# TODO: this class inherits LdaModel and overwrites some methods. There is some code
# duplication still, and a refactor could be made to avoid this. Comments with "TODOs"
# are included in the code where this is the case, for example in the log_perplexity
# and do_estep methods.
import logging
from itertools import chain
from copy import deepcopy
from shutil import copyfile
from os.path import isfile
from os import remove
import numpy as np # for arrays, array broadcasting etc.
from scipy.special import gammaln # gamma function utils
from gensim import utils
from gensim.models import LdaModel
from gensim.models.ldamodel import LdaState
from gensim.matutils import dirichlet_expectation, mean_absolute_difference
from gensim.corpora import MmCorpus
logger = logging.getLogger(__name__)
class AuthorTopicState(LdaState):
"""Encapsulate information for computation of :class:`~gensim.models.atmodel.AuthorTopicModel`."""
def __init__(self, eta, lambda_shape, gamma_shape):
"""
Parameters
----------
eta: numpy.ndarray
Dirichlet topic parameter for sparsity.
lambda_shape: (int, int)
Initialize topic parameters.
gamma_shape: int
Initialize topic parameters.
"""
self.eta = eta
self.sstats = np.zeros(lambda_shape)
self.gamma = np.zeros(gamma_shape)
self.numdocs = 0
self.dtype = np.float64 # To be compatible with LdaState
def construct_doc2author(corpus, author2doc):
"""Create a mapping from document IDs to author IDs.
Parameters
----------
corpus: iterable of list of (int, float)
Corpus in BoW format.
author2doc: dict of (str, list of int)
Mapping of authors to documents.
Returns
-------
dict of (int, list of str)
Document to Author mapping.
"""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
def construct_author2doc(doc2author):
"""Make a mapping from author IDs to document IDs.
Parameters
----------
doc2author: dict of (int, list of str)
Mapping of document id to authors.
Returns
-------
dict of (str, list of int)
Mapping of authors to document ids.
"""
# First get a set of all authors.
authors_ids = set()
for d, a_doc_ids in doc2author.items():
for a in a_doc_ids:
authors_ids.add(a)
# Now construct the dictionary.
author2doc = {}
for a in authors_ids:
author2doc[a] = []
for d, a_ids in doc2author.items():
if a in a_ids:
author2doc[a].append(d)
return author2doc
class AuthorTopicModel(LdaModel):
"""The constructor estimates the author-topic model parameters based on a training corpus."""
def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
Parameters
----------
corpus : iterable of list of (int, float), optional
Corpus in BoW format
num_topics : int, optional
Number of topics to be extracted from the training corpus.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
A mapping from word ids (integers) to words (strings).
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
chunksize : int, optional
Controls the size of the mini-batches.
passes : int, optional
Number of times the model makes a pass over the entire training data.
iterations : int, optional
Maximum number of times the model loops over each document.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
alpha : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on document-topic distribution, this can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
eta : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on topic-word distribution, this can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
update_every : int, optional
Make updates in topic probability for latest mini-batch.
eval_every : int, optional
Calculate and estimate log perplexity for latest mini-batch.
gamma_threshold : float, optional
Threshold value of gamma(topic difference between consecutive two topics)
until which the iterations continue.
serialized : bool, optional
Indicates whether the input corpora to the model are simple lists
or saved to the hard-drive.
serialization_path : str, optional
Must be set to a filepath, if `serialized = True` is used.
minimum_probability : float, optional
Controls filtering the topics returned for a document (bow).
random_state : {int, numpy.random.RandomState}, optional
Set the state of the random number generator inside the author-topic model.
"""
# NOTE: this doesn't call constructor of a base class, but duplicates most of this code
# so we have to set dtype to float64 default here
self.dtype = np.float64
# NOTE: as distributed version of this model is not implemented, "distributed" is set to false. Some of the
# infrastructure to implement a distributed author-topic model is already in place,
# such as the AuthorTopicState.
distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish input space dimensionality"
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute the author-topic model over an empty collection (no terms)")
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if serialized and not serialization_path:
raise ValueError(
"If serialized corpora are used, a the path to a folder "
"where the corpus should be saved must be provided (serialized_path)."
)
if serialized and serialization_path:
assert not isfile(serialization_path), \
"A file already exists at the serialization_path path; " \
"choose a different serialization_path, or delete the file."
self.serialization_path = serialization_path
# Initialize an empty self.corpus.
self.init_empty_corpus()
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms)
)
self.random_state = utils.get_random_state(random_state)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# Initialize the variational distributions q(beta|lambda) and q(theta|gamma)
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None and (author2doc is not None or doc2author is not None):
use_numpy = self.dispatcher is not None
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
def __str__(self):
"""Get a string representation of object.
Returns
-------
str
String representation of current instance.
"""
return "%s<num_terms=%s, num_topics=%s, num_authors=%s, decay=%s, chunksize=%s>" % \
(self.__class__.__name__, self.num_terms, self.num_topics, self.num_authors, self.decay, self.chunksize)
def init_empty_corpus(self):
"""Initialize an empty corpus.
If the corpora are to be treated as lists, simply initialize an empty list.
If serialization is used, initialize an empty corpus using :class:`~gensim.corpora.mmcorpus.MmCorpus`.
"""
if self.serialized:
# Initialize the corpus as a serialized empty list.
# This corpus will be extended in self.update.
MmCorpus.serialize(self.serialization_path, []) # Serialize empty corpus.
self.corpus = MmCorpus(self.serialization_path) # Store serialized corpus object in self.corpus.
else:
# All input corpora are assumed to just be lists.
self.corpus = []
def extend_corpus(self, corpus):
"""Add new documents from `corpus` to `self.corpus`.
If serialization is used, then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list of documents, is simply extended.
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format
Raises
------
AssertionError
If serialized == False and corpus isn't list.
"""
if self.serialized:
# Re-serialize the entire corpus while appending the new documents.
if isinstance(corpus, MmCorpus):
# Check that we are not attempting to overwrite the serialized corpus.
assert self.corpus.input != corpus.input, \
'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus) # A generator with the old and new documents.
# Make a temporary copy of the file where the corpus is serialized.
copyfile(self.serialization_path, self.serialization_path + '.tmp')
self.corpus.input = self.serialization_path + '.tmp' # Point the old corpus at this temporary file.
# Re-serialize the old corpus, and extend it with the new corpus.
MmCorpus.serialize(self.serialization_path, corpus_chain)
self.corpus = MmCorpus(self.serialization_path) # Store the new serialized corpus object in self.corpus.
remove(self.serialization_path + '.tmp') # Remove the temporary file again.
else:
# self.corpus and corpus are just lists, just extend the list.
# First check that corpus is actually a list.
assert isinstance(corpus, list), "If serialized == False, all input corpora must be lists."
self.corpus.extend(corpus)
def compute_phinorm(self, expElogthetad, expElogbetad):
r"""Efficiently computes the normalizing factor in phi.
Parameters
----------
expElogthetad: numpy.ndarray
Value of variational distribution :math:`q(\theta|\gamma)`.
expElogbetad: numpy.ndarray
Value of variational distribution :math:`q(\beta|\lambda)`.
Returns
-------
float
Value of normalizing factor.
"""
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = expElogtheta_sum.dot(expElogbetad) + 1e-100
return phinorm
def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
"""Give a `chunk` of sparse document vectors, update gamma for each author corresponding to the `chuck`.
Warnings
--------
The whole input chunk of document is assumed to fit in RAM, chunking of a large corpus must be done earlier
in the pipeline.
Avoids computing the `phi` variational parameter directly using the
optimization presented in `Lee, Seung: "Algorithms for non-negative matrix factorization", NIPS 2001
<https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf>`_.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
rhot : float
Value of rho for conducting inference on documents.
collect_sstats : boolean, optional
If True - collect sufficient statistics needed to update the model's topic-word distributions, and return
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`. `gamma_chunk` is of shape
`len(chunk_authors) x self.num_topics`,where `chunk_authors` is the number of authors in the documents in
the current chunk.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
Returns
-------
(numpy.ndarray, numpy.ndarray)
gamma_chunk and sstats (if `collect_sstats == True`, otherwise - None)
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Stack all the computed gammas into this output array.
gamma_chunk = np.zeros((0, self.num_topics))
# Now, for each document d update gamma and phi w.r.t. all authors in those documents.
for d, doc in enumerate(chunk):
if chunk_doc_idx is not None:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get the IDs and counts of all the words in the current document.
# TODO: this is duplication of code in LdaModel. Refactor.
if doc and not isinstance(doc[0][0], (int, np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
ids = np.array(ids, dtype=int)
cts = np.fromiter((cnt for _, cnt in doc), dtype=int, count=len(doc))
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = np.fromiter((self.author2id[a] for a in self.doc2author[doc_no]), dtype=int)
gammad = self.state.gamma[authors_d, :] # gamma of document d before update.
tilde_gamma = gammad.copy() # gamma that will be updated.
# Compute the expectation of the log of the Dirichlet parameters theta and beta.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
# Compute the normalizing constant of phi for the current document.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Iterate between gamma and phi until convergence
for _ in range(self.iterations):
lastgamma = tilde_gamma.copy()
# Update gamma.
# phi is computed implicitly below,
dot = np.dot(cts / phinorm, expElogbetad.T)
for ai, a in enumerate(authors_d):
tilde_gamma[ai, :] = (
self.alpha
+ len(self.author2doc[self.id2author[a]]) * expElogthetad[ai, :] * dot
)
# Update gamma.
# Interpolation between document d's "local" gamma (tilde_gamma),
# and "global" gamma (gammad).
tilde_gamma = (1 - rhot) * gammad + rhot * tilde_gamma
# Update Elogtheta and Elogbeta, since gamma and lambda have been updated.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
# Update the normalizing constant in phi.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Check for convergence.
# Criterion is mean change in "local" gamma.
meanchange_gamma = mean_absolute_difference(tilde_gamma.ravel(), lastgamma.ravel())
gamma_condition = meanchange_gamma < self.gamma_threshold
if gamma_condition:
converged += 1
break
# End of iterations loop.
# Store the updated gammas in the model state.
self.state.gamma[authors_d, :] = tilde_gamma
# Stack the new gammas into the output array.
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, cts / phinorm)
if len(chunk) > 1:
logger.debug(
"%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations
)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * \sum_a phi_{dwak}
# = \sum_d n_{dw} * exp{Elogtheta_{ak} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma_chunk, sstats
def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
"""Performs inference (E-step) on a chunk of documents, and accumulate the collected sufficient statistics.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
rhot : float
Value of rho for conducting inference on documents.
state : int, optional
Initializes the state for a new E iteration.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
Returns
-------
float
Value of gamma for training of model.
"""
# TODO: this method is somewhat similar to the one in LdaModel. Refactor if possible.
if state is None:
state = self.state
gamma, sstats = self.inference(
chunk, author2doc, doc2author, rhot,
collect_sstats=True, chunk_doc_idx=chunk_doc_idx
)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
"""Calculate per-word likelihood bound, using the `chunk` of documents as evaluation corpus.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
total_docs : int, optional
Initializes the value for total number of documents.
Returns
-------
float
Value of per-word likelihood bound.
"""
# TODO: This method is very similar to the one in LdaModel. Refactor.
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / \
(subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""Train the model with new documents, by EM-iterating over `corpus` until the topics converge (or until the
maximum number of allowed iterations is reached).
Notes
-----
This update also supports updating an already trained model (`self`) with new documents from `corpus`;
the two models are then merged in proportion to the number of old vs. new documents.
This feature is still experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand, this equals the
online update of `'Online Learning for LDA' by Hoffman et al.`_
and is guaranteed to converge for any `decay` in (0.5, 1]. Additionally, for smaller corpus sizes, an
increasing `offset` may be beneficial (see Table 1 in the same paper).
If update is called with authors that already exist in the model, it will resume training on not only new
documents for that author, but also the previously seen documents. This is necessary for those authors' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are to appended to all the previously seen
documents, and author2doc is combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
:meth:`~gensim.models.atmodel.AuthorTopicModel.update`.
It is not possible to add new authors to existing documents, as all documents in `corpus` are assumed to be
new documents.
Parameters
----------
corpus : iterable of list of (int, float)
The corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
chunksize : int, optional
Controls the size of the mini-batches.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
passes : int, optional
Number of times the model makes a pass over the entire training data.
update_every : int, optional
Make updates in topic probability for latest mini-batch.
eval_every : int, optional
Calculate and estimate log perplexity for latest mini-batch.
iterations : int, optional
Maximum number of times the model loops over each document
gamma_threshold : float, optional
Threshold value of gamma(topic difference between consecutive two topics)
until which the iterations continue.
chunks_as_numpy : bool, optional
Whether each chunk passed to :meth:`~gensim.models.atmodel.AuthorTopicModel.inference` should be a numpy
array of not. Numpy can in some settings turn the term IDs into floats, these will be converted back into
integers in inference, which incurs a performance hit. For distributed computing (not supported now)
it may be desirable to keep the chunks as numpy arrays.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# TODO: if deepcopy is not used here, something goes wrong. When unit tests are run (specifically "testPasses"),
# the process simply gets killed.
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
# TODO: it is not possible to add new authors to an existing document (all input documents are treated
# as completely new documents). Perhaps this functionality could be implemented.
# If it's absolutely necessary, the user can delete the documents that have new authors, and call update
# on them with the new and old authors.
if corpus is None:
# Just keep training on the already available data.
# Assumes self.update() has been called before with input documents and corresponding authors.
assert self.total_docs > 0, 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in range(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if doc2author is None and author2doc is None:
raise ValueError(
'at least one of author2doc/doc2author must be specified, to establish input space dimensionality'
)
# If either doc2author or author2doc is missing, construct them from the other.
if doc2author is None:
doc2author = construct_doc2author(corpus, author2doc)
elif author2doc is None:
author2doc = construct_author2doc(doc2author)
# Number of authors that need to be updated.
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
logger.warning("AuthorTopicModel.update() called with an empty corpus")
return
self.total_docs += len_input_corpus
# Add new documents in corpus to self.corpus.
self.extend_corpus(corpus)
# Obtain a list of new authors.
new_authors = []
# Sorting the author names makes the model more reproducible.
for a in sorted(author2doc.keys()):
if not self.author2doc.get(a):
new_authors.append(a)
num_new_authors = len(new_authors)
# Add new authors do author2id/id2author dictionaries.
for a_id, a_name in enumerate(new_authors):
self.author2id[a_name] = a_id + self.num_authors
self.id2author[a_id + self.num_authors] = a_name
# Increment the number of total authors seen.
self.num_authors += num_new_authors
# Initialize the variational distributions q(theta|gamma)
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Combine author2doc with self.author2doc.
# First, increment the document IDs by the number of previously seen documents.
for a, doc_ids in author2doc.items():
doc_ids = [d + self.total_docs - len_input_corpus for d in doc_ids]
# For all authors in the input corpus, add the new documents.
for a, doc_ids in author2doc.items():
if self.author2doc.get(a):
# This is not a new author, append new documents.
self.author2doc[a].extend(doc_ids)
else:
# This is a new author, create index.
self.author2doc[a] = doc_ids
# Add all new documents to self.doc2author.
for d, a_list in doc2author.items():
self.doc2author[d] = a_list
# Train on all documents of authors in input_corpus.
train_corpus_idx = set()
# Collect all documents of authors.
for doc_ids in self.author2doc.values():
train_corpus_idx.update(doc_ids)
# Make the list of training documents unique.
train_corpus_idx = sorted(train_corpus_idx)
# train_corpus_idx is only a list of indexes, so "len" is valid.
lencorpus = len(train_corpus_idx)
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s author-topic training, %s topics, %s authors, "
"%i passes over the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, num_input_authors, passes, lencorpus, updateafter,
evalafter, iterations, gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
for pass_ in range(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
# gamma is not needed in "other", thus its shape is (0, 0).
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for chunk_no, chunk_doc_idx in enumerate(
utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
# log_perplexity requires the indexes of the documents being evaluated, to know what authors
# correspond to the documents.
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# do_estep requires the indexes of the documents being trained on, to know what authors
# correspond to the documents.
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
r"""Estimate the variational bound of documents from `corpus`.
:math:`\mathbb{E_{q}}[\log p(corpus)] - \mathbb{E_{q}}[\log q(corpus)]`
Notes
-----
There are basically two use cases of this method:
#. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
#. `chunk` is a test set (held-out data), and `author2doc` and `doc2author` corresponding to this test set
are provided. There must not be any new authors passed to this method, `chunk_doc_idx` is not needed
in this case.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
subsample_ratio : float, optional
Used for calculation of word score for estimation of variational bound.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of documents that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
Returns
-------
float
Value of variational bound score.
"""
# TODO: enable evaluation of documents with new authors. One could, for example, make it
# possible to pass a list of documents to self.inference with no author dictionaries,
# assuming all the documents correspond to one (unseen) author, learn the author's
# gamma, and return gamma (without adding it to self.state.gamma). Of course,
# collect_sstats should be set to false, so that the model is not updated w.r.t. these
# new documents.
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if author2doc is None and doc2author is None:
# Evaluating on training documents (chunk of self.corpus).
author2doc = self.author2doc
doc2author = self.doc2author
if not chunk_doc_idx:
# If author2doc and doc2author are not provided, chunk is assumed to be a subset of
# self.corpus, and chunk_doc_idx is thus required.
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided. '
'Consult documentation of bound method.'
)
elif author2doc is not None and doc2author is not None:
# Training on held-out documents (documents not seen during training).
# All authors in dictionaries must still be seen during training.
for a in author2doc.keys():
if not self.author2doc.get(a):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided, not both. '
'Consult documentation of bound method.'
)
else:
raise ValueError(
'Either both author2doc and doc2author should be provided, or neither. '
'Consult documentation of bound method.'
)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for d, doc in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = np.fromiter((self.author2id[a] for a in self.doc2author[doc_no]), dtype=int)
ids = np.fromiter((id for id, _ in doc), dtype=int, count=len(doc)) # Word IDs in doc.
cts = np.fromiter((cnt for _, cnt in doc), dtype=int, count=len(doc)) # Word counts.
if d % self.chunksize == 0:
logger.debug("bound: at document #%i in chunk", d)
# Computing the bound requires summing over expElogtheta[a, k] * expElogbeta[k, v], which
# is the same computation as in normalizing phi.
phinorm = self.compute_phinorm(expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += np.log(1.0 / len(authors_d)) * sum(cts) + cts.dot(np.log(phinorm))
# Compensate likelihood for when `chunk` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
word_score *= subsample_ratio
# E[log p(theta | alpha) - log q(theta | gamma)]
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum((self.alpha - gamma[a, :]) * Elogtheta[a, :])
theta_score += np.sum(gammaln(gamma[a, :]) - gammaln(self.alpha))
theta_score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :]))
# theta_score is rescaled in a similar fashion.
# TODO: treat this in a more general way, similar to how it is done with word_score.
theta_score *= self.num_authors / len(author2doc)
# E[log p(beta | eta) - log q (beta | lambda)]
beta_score = 0.0
beta_score += np.sum((self.eta - _lambda) * Elogbeta)
beta_score += np.sum(gammaln(_lambda) - gammaln(self.eta))
sum_eta = np.sum(self.eta)
beta_score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
total_score = word_score + theta_score + beta_score
return total_score
def get_document_topics(self, word_id, minimum_probability=None):
"""Override :meth:`~gensim.models.ldamodel.LdaModel.get_document_topics` and simply raises an exception.
Warnings
--------
This method invalid for model, use :meth:`~gensim.models.atmodel.AuthorTopicModel.get_author_topics` or
:meth:`~gensim.models.atmodel.AuthorTopicModel.get_new_author_topics` instead.
Raises
------
NotImplementedError
Always.
"""
raise NotImplementedError(
'Method "get_document_topics" is not valid for the author-topic model. '
'Use the "get_author_topics" method.'
)
def get_new_author_topics(self, corpus, minimum_probability=None):
"""Infers topics for new author.
Infers a topic distribution for a new author over the passed corpus of docs,
assuming that all documents are from this single new author.
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format.
minimum_probability : float, optional
Ignore topics with probability below this value, if None - 1e-8 is used.
Returns
-------
list of (int, float)
Topic distribution for the given `corpus`.
"""
def rho():
return pow(self.offset + 1 + 1, -self.decay)
def rollback_new_author_chages():
self.state.gamma = self.state.gamma[0:-1]
del self.author2doc[new_author_name]
a_id = self.author2id[new_author_name]
del self.id2author[a_id]
del self.author2id[new_author_name]
for new_doc_id in corpus_doc_idx:
del self.doc2author[new_doc_id]
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
raise ValueError("AuthorTopicModel.get_new_author_topics() called with an empty corpus")
new_author_name = "placeholder_name"
# indexes representing the documents in the input corpus
corpus_doc_idx = list(range(self.total_docs, self.total_docs + len_input_corpus))
# Add the new placeholder author to author2id/id2author dictionaries.
num_new_authors = 1
author_id = self.num_authors
if new_author_name in self.author2id:
raise ValueError("self.author2id already has 'placeholder_name' author")
self.author2id[new_author_name] = author_id
self.id2author[author_id] = new_author_name
# Add new author in author2doc and doc into doc2author.
self.author2doc[new_author_name] = corpus_doc_idx
for new_doc_id in corpus_doc_idx:
self.doc2author[new_doc_id] = [new_author_name]
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Should not record the sstats, as we are going to delete the new author after calculated.
try:
gammat, _ = self.inference(
corpus, self.author2doc, self.doc2author, rho(),
collect_sstats=False, chunk_doc_idx=corpus_doc_idx
)
new_author_topics = self.get_author_topics(new_author_name, minimum_probability)
finally:
rollback_new_author_chages()
return new_author_topics
def get_author_topics(self, author_name, minimum_probability=None):
"""Get topic distribution the given author.
Parameters
----------
author_name : str
Name of the author for which the topic distribution needs to be estimated.
minimum_probability : float, optional
Sets the minimum probability value for showing the topics of a given author, topics with probability <
`minimum_probability` will be ignored.
Returns
-------
list of (int, float)
Topic distribution of an author.
Example
-------
.. sourcecode:: pycon
>>> from gensim.models import AuthorTopicModel
>>> from gensim.corpora import mmcorpus
>>> from gensim.test.utils import common_dictionary, datapath, temporary_file
>>> author2doc = {
... 'john': [0, 1, 2, 3, 4, 5, 6],
... 'jane': [2, 3, 4, 5, 6, 7, 8],
... 'jack': [0, 2, 4, 6, 8]
... }
>>>
>>> corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
>>>
>>> with temporary_file("serialized") as s_path:
... model = AuthorTopicModel(
... corpus, author2doc=author2doc, id2word=common_dictionary, num_topics=4,
... serialized=True, serialization_path=s_path
... )
...
... model.update(corpus, author2doc) # update the author-topic model with additional documents
>>>
>>> # construct vectors for authors
>>> author_vecs = [model.get_author_topics(author) for author in model.id2author.values()]
"""
author_id = self.author2id[author_name]
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
topic_dist = self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :])
author_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
return author_topics
def __getitem__(self, author_names, eps=None):
"""Get topic distribution for input `author_names`.
Parameters
----------
author_names : {str, list of str}
Name(s) of the author for which the topic distribution needs to be estimated.
eps : float, optional
The minimum probability value for showing the topics of a given author, topics with probability < `eps`
will be ignored.
Returns
-------
list of (int, float) **or** list of list of (int, float)
Topic distribution for the author(s), type depends on type of `author_names`.
"""
if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
| 54,236
|
Python
|
.py
| 1,005
| 42.537313
| 120
| 0.621377
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,125
|
hdpmodel.py
|
piskvorky_gensim/gensim/models/hdpmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
"""Module for `online Hierarchical Dirichlet Processing
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
The core estimation code is directly adapted from the `blei-lab/online-hdp <https://github.com/blei-lab/online-hdp>`_
from `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet Process", JMLR (2011)
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
Examples
--------
Train :class:`~gensim.models.hdpmodel.HdpModel`
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import HdpModel
>>>
>>> hdp = HdpModel(common_corpus, common_dictionary)
You can then infer topic distributions on new, unseen documents, with
.. sourcecode:: pycon
>>> unseen_document = [(1, 3.), (2, 4)]
>>> doc_hdp = hdp[unseen_document]
To print 20 topics with top 10 most probable words.
.. sourcecode:: pycon
>>> topic_info = hdp.print_topics(num_topics=20, num_words=10)
The model can be updated (trained) with new documents via
.. sourcecode:: pycon
>>> hdp.update([[(1, 2)], [(1, 1), (4, 5)]])
"""
import logging
import time
import warnings
import numpy as np
from scipy.special import gammaln, psi # gamma function utils
from gensim import interfaces, utils, matutils
from gensim.matutils import dirichlet_expectation, mean_absolute_difference
from gensim.models import basemodel, ldamodel
from gensim.utils import deprecated
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def expect_log_sticks(sticks):
r"""For stick-breaking hdp, get the :math:`\mathbb{E}[log(sticks)]`.
Parameters
----------
sticks : numpy.ndarray
Array of values for stick.
Returns
-------
numpy.ndarray
Computed :math:`\mathbb{E}[log(sticks)]`.
"""
dig_sum = psi(np.sum(sticks, 0))
ElogW = psi(sticks[0]) - dig_sum
Elog1_W = psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
r"""Performs EM-iteration on a single document for calculation of likelihood for a maximum iteration of `max_iter`.
Parameters
----------
doc_word_ids : int
Id of corresponding words in a document.
doc_word_counts : int
Count of words in a single document.
alpha : numpy.ndarray
Lda equivalent value of alpha.
beta : numpy.ndarray
Lda equivalent value of beta.
max_iter : int, optional
Maximum number of times the expectation will be maximised.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Computed (:math:`likelihood`, :math:`\gamma`).
"""
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in range(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = mean_absolute_difference(gamma, lastgamma)
if meanchange < meanchangethresh:
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(gammaln(gamma) - gammaln(alpha))
likelihood += gammaln(np.sum(alpha)) - gammaln(np.sum(gamma))
return likelihood, gamma
class SuffStats:
"""Stores sufficient statistics for the current chunk of document(s) whenever Hdp model is updated with new corpus.
These stats are used when updating lambda and top level sticks. The statistics include number of documents in the
chunk, length of words in the documents and top level truncation level.
"""
def __init__(self, T, Wt, Dt):
"""
Parameters
----------
T : int
Top level truncation level.
Wt : int
Length of words in the documents.
Dt : int
Chunk size.
"""
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
"""Fill the sticks and beta array with 0 scalar value."""
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
r"""`Hierarchical Dirichlet Process model <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_
Topic models promise to help summarize and organize large archives of texts that cannot be easily analyzed by hand.
Hierarchical Dirichlet process (HDP) is a powerful mixed-membership model for the unsupervised analysis of grouped
data. Unlike its finite counterpart, latent Dirichlet allocation, the HDP topic model infers the number of topics
from the data. Here we have used Online HDP, which provides the speed of online variational Bayes with the modeling
flexibility of the HDP. The idea behind Online variational Bayes in general is to optimize the variational
objective function with stochastic optimization.The challenge we face is that the existing coordinate ascent
variational Bayes algorithms for the HDP require complicated approximation methods or numerical optimization. This
model utilises stick breaking construction of Hdp which enables it to allow for coordinate-ascent variational Bayes
without numerical approximation.
**Stick breaking construction**
To understand the HDP model we need to understand how it is modelled using the stick breaking construction. A very
good analogy to understand the stick breaking construction is `chinese restaurant franchise
<https://www.cs.princeton.edu/courses/archive/fall07/cos597C/scribe/20070921.pdf>`_.
For this assume that there is a restaurant franchise (`corpus`) which has a large number of restaurants
(`documents`, `j`) under it. They have a global menu of dishes (`topics`, :math:`\Phi_{k}`) which they serve.
Also, a single dish (`topic`, :math:`\Phi_{k}`) is only served at a single table `t` for all the customers
(`words`, :math:`\theta_{j,i}`) who sit at that table.
So, when a customer enters the restaurant he/she has the choice to make where he/she wants to sit.
He/she can choose to sit at a table where some customers are already sitting , or he/she can choose to sit
at a new table. Here the probability of choosing each option is not same.
Now, in this the global menu of dishes correspond to the global atoms :math:`\Phi_{k}`, and each restaurant
correspond to a single document `j`. So the number of dishes served in a particular restaurant correspond to the
number of topics in a particular document. And the number of people sitting at each table correspond to the number
of words belonging to each topic inside the document `j`.
Now, coming on to the stick breaking construction, the concept understood from the chinese restaurant franchise is
easily carried over to the stick breaking construction for hdp (`"Figure 1" from "Online Variational Inference
for the Hierarchical Dirichlet Process" <http://proceedings.mlr.press/v15/wang11a/wang11a.pdf>`_).
A two level hierarchical dirichlet process is a collection of dirichlet processes :math:`G_{j}` , one for each
group, which share a base distribution :math:`G_{0}`, which is also a dirichlet process. Also, all :math:`G_{j}`
share the same set of atoms, :math:`\Phi_{k}`, and only the atom weights :math:`\pi _{jt}` differs.
There will be multiple document-level atoms :math:`\psi_{jt}` which map to the same corpus-level atom
:math:`\Phi_{k}`. Here, the :math:`\beta` signify the weights given to each of the topics globally. Also, each
factor :math:`\theta_{j,i}` is distributed according to :math:`G_{j}`, i.e., it takes on the value of
:math:`\Phi_{k}` with probability :math:`\pi _{jt}`. :math:`C_{j,t}` is an indicator variable whose value `k`
signifies the index of :math:`\Phi`. This helps to map :math:`\psi_{jt}` to :math:`\Phi_{k}`.
The top level (`corpus` level) stick proportions correspond the values of :math:`\beta`,
bottom level (`document` level) stick proportions correspond to the values of :math:`\pi`.
The truncation level for the corpus (`K`) and document (`T`) corresponds to the number of :math:`\beta`
and :math:`\pi` which are in existence.
Now, whenever coordinate ascent updates are to be performed, they happen at two level. The document level as well
as corpus level.
At document level, we update the following:
#. The parameters to the document level sticks, i.e, a and b parameters of :math:`\beta` distribution of the
variable :math:`\pi _{jt}`.
#. The parameters to per word topic indicators, :math:`Z_{j,n}`. Here :math:`Z_{j,n}` selects topic parameter
:math:`\psi_{jt}`.
#. The parameters to per document topic indices :math:`\Phi_{jtk}`.
At corpus level, we update the following:
#. The parameters to the top level sticks, i.e., the parameters of the :math:`\beta` distribution for the
corpus level :math:`\beta`, which signify the topic distribution at corpus level.
#. The parameters to the topics :math:`\Phi_{k}`.
Now coming on to the steps involved, procedure for online variational inference for the Hdp model is as follows:
1. We initialise the corpus level parameters, topic parameters randomly and set current time to 1.
2. Fetch a random document j from the corpus.
3. Compute all the parameters required for document level updates.
4. Compute natural gradients of corpus level parameters.
5. Initialise the learning rate as a function of kappa, tau and current time. Also, increment current time by 1
each time it reaches this step.
6. Update corpus level parameters.
Repeat 2 to 6 until stopping condition is not met.
Here the stopping condition corresponds to
* time limit expired
* chunk limit reached
* whole corpus processed
Attributes
----------
lda_alpha : numpy.ndarray
Same as :math:`\alpha` from :class:`gensim.models.ldamodel.LdaModel`.
lda_beta : numpy.ndarray
Same as :math:`\beta` from from :class:`gensim.models.ldamodel.LdaModel`.
m_D : int
Number of documents in the corpus.
m_Elogbeta : numpy.ndarray:
Stores value of dirichlet expectation, i.e., compute :math:`E[log \theta]` for a vector
:math:`\theta \sim Dir(\alpha)`.
m_lambda : {numpy.ndarray, float}
Drawn samples from the parameterized gamma distribution.
m_lambda_sum : {numpy.ndarray, float}
An array with the same shape as `m_lambda`, with the specified axis (1) removed.
m_num_docs_processed : int
Number of documents finished processing.This is incremented in size of chunks.
m_r : list
Acts as normaliser in lazy updating of `m_lambda` attribute.
m_rhot : float
Assigns weight to the information obtained from the mini-chunk and its value it between 0 and 1.
m_status_up_to_date : bool
Flag to indicate whether `lambda `and :math:`E[log \theta]` have been updated if True, otherwise - not.
m_timestamp : numpy.ndarray
Helps to keep track and perform lazy updates on lambda.
m_updatect : int
Keeps track of current time and is incremented every time :meth:`~gensim.models.hdpmodel.HdpModel.update_lambda`
is called.
m_var_sticks : numpy.ndarray
Array of values for stick.
m_varphi_ss : numpy.ndarray
Used to update top level sticks.
m_W : int
Length of dictionary for the input corpus.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None, random_state=None):
"""
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary for the input corpus.
max_chunks : int, optional
Upper bound on how many chunks to process. It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time (in seconds) for which model will be trained.
chunksize : int, optional
Number of documents in one chuck.
kappa: float,optional
Learning parameter which acts as exponential decay factor to influence extent of learning from each batch.
tau: float, optional
Learning parameter which down-weights early iterations of documents.
K : int, optional
Second level truncation level
T : int, optional
Top level truncation level
alpha : int, optional
Second level concentration
gamma : int, optional
First level concentration
eta : float, optional
The topic Dirichlet
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters for a
single document.
outputdir : str, optional
Stores topic and options information in the specified directory.
random_state : {None, int, array_like, :class:`~np.random.RandomState`, optional}
Adds a little random jitter to randomize results around same alpha when trying to fetch a closest
corresponding lda model from :meth:`~gensim.models.hdpmodel.HdpModel.suggested_lda_model`
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.random_state = utils.get_random_state(random_state)
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = self.random_state.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
"""Infers the gamma value based for `chunk`.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
Returns
-------
numpy.ndarray
First level concentration, i.e., Gamma value.
Raises
------
RuntimeError
If model doesn't trained yet.
"""
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
"""Accessor method for generating topic distribution of given document.
Parameters
----------
bow : {iterable of list of (int, float), list of (int, float)
BoW representation of the document/corpus to get topics for.
eps : float, optional
Ignore topics with probability below `eps`.
Returns
-------
list of (int, float) **or** :class:`gensim.interfaces.TransformedCorpus`
Topic distribution for the given document/corpus `bow`, as a list of `(topic_id, topic_probability)` or
transformed corpus
"""
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist) if topicvalue >= eps]
def update(self, corpus):
"""Train the model with new documents, by EM-iterating over `corpus` until any of the conditions is satisfied.
* time limit expired
* chunk limit reached
* whole corpus processed
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format.
"""
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.perf_counter()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i', self.m_num_docs_processed, self.m_D)
def update_finished(self, start_time, chunks_processed, docs_processed):
"""Flag to determine whether the model has been updated with the new corpus or not.
Parameters
----------
start_time : float
Indicates the current processor time as a floating point number expressed in seconds.
The resolution is typically better on Windows than on Unix by one microsecond due to differing
implementation of underlying function calls.
chunks_processed : int
Indicates progress of the update in terms of the number of chunks processed.
docs_processed : int
Indicates number of documents finished processing.This is incremented in size of chunks.
Returns
-------
bool
If True - model is updated, False otherwise.
"""
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks)
# time limit reached
or (self.max_time and time.perf_counter() - start_time > self.max_time)
# no limits and whole corpus has been processed once
or (not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
"""Performs lazy update on necessary columns of lambda and variational inference for documents in the chunk.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
update : bool, optional
If True - call :meth:`~gensim.models.hdpmodel.HdpModel.update_lambda`.
opt_o : bool, optional
Passed as argument to :meth:`~gensim.models.hdpmodel.HdpModel.update_lambda`.
If True then the topics will be ordered, False otherwise.
Returns
-------
(float, int)
A tuple of likelihood and sum of all the word counts from each document in the corpus.
"""
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
psi(self.m_eta + self.m_lambda[:, word_list]) - \
psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(
ss, Elogsticks_1st,
unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge
)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return score, count
def doc_e_step(self, ss, Elogsticks_1st, unique_words, doc_word_ids, doc_word_counts, var_converge):
"""Performs E step for a single doc.
Parameters
----------
ss : :class:`~gensim.models.hdpmodel.SuffStats`
Stats for all document(s) in the chunk.
Elogsticks_1st : numpy.ndarray
Computed Elogsticks value by stick-breaking process.
unique_words : dict of (int, int)
Number of unique words in the chunk.
doc_word_ids : iterable of int
Word ids of for a single document.
doc_word_counts : iterable of int
Word counts of all words in a single document.
var_converge : float
Lower bound on the right side of convergence. Used when updating variational parameters for a single
document.
Returns
-------
float
Computed value of likelihood for a single document.
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
# very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
# update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd # noqa:F821
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (psi(v) - dig_sum))
likelihood -= np.sum(gammaln(np.sum(v, 0))) - np.sum(gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
"""Update appropriate columns of lambda and top level sticks based on documents.
Parameters
----------
sstats : :class:`~gensim.models.hdpmodel.SuffStats`
Statistic for all document(s) in the chunk.
word_list : list of int
Contains word id of all the unique words in the chunk of documents on which update is being performed.
opt_o : bool, optional
If True - invokes a call to :meth:`~gensim.models.hdpmodel.HdpModel.optimal_ordering` to order the topics.
"""
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = \
self.m_lambda[:, word_list] * (1 - rhot) + rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = \
(1.0 - rhot) * self.m_varphi_ss + rhot * sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
# update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""Performs ordering on the topics."""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""Since we're doing lazy updates on lambda, at any given moment the current state of lambda may not be
accurate. This function updates all of the elements of lambda and Elogbeta so that if (for example) we want to
print out the topics we've learned we'll get the correct behavior.
"""
for w in range(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] - self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = \
psi(self.m_eta + self.m_lambda) - psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None):
"""Print the `num_words` most probable words for topic `topic_id`.
Parameters
----------
topic_id : int
Acts as a representative index for a particular topic.
topn : int, optional
Number of most probable words to show from given `topic_id`.
log : bool, optional
If True - logs a message with level INFO on the logger object.
formatted : bool, optional
If True - get the topics as a list of strings, otherwise - get the topics as lists of (weight, word) pairs.
num_words : int, optional
DEPRECATED, USE `topn` INSTEAD.
Warnings
--------
The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead.
Returns
-------
list of (str, numpy.float) **or** list of str
Topic terms output displayed whose format depends on `formatted` parameter.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topic(topic_id, topn, log, formatted)
def get_topics(self):
"""Get the term topic matrix learned during inference.
Returns
-------
np.ndarray
`num_topics` x `vocabulary_size` array of floats
"""
topics = self.m_lambda + self.m_eta
return topics / topics.sum(axis=1)[:, None]
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
"""Print the `num_words` most probable words for `num_topics` number of topics.
Parameters
----------
num_topics : int, optional
Number of topics for which most probable `num_words` words will be fetched, if -1 - print all topics.
num_words : int, optional
Number of most probable words to show from `num_topics` number of topics.
log : bool, optional
If True - log a message with level INFO on the logger object.
formatted : bool, optional
If True - get the topics as a list of strings, otherwise - get the topics as lists of (weight, word) pairs.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for topic terms depends on the value of `formatted` parameter.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
@deprecated("This method will be removed in 4.0.0, use `save` instead.")
def save_topics(self, doc_count=None):
"""Save discovered topics.
Warnings
--------
This method is deprecated, use :meth:`~gensim.models.hdpmodel.HdpModel.save` instead.
Parameters
----------
doc_count : int, optional
Indicates number of documents finished processing and are to be saved.
"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s", fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
@deprecated("This method will be removed in 4.0.0, use `save` instead.")
def save_options(self):
"""Writes all the values of the attributes for the current model in "options.dat" file.
Warnings
--------
This method is deprecated, use :meth:`~gensim.models.hdpmodel.HdpModel.save` instead.
"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""Get corresponding alpha and beta values of a LDA almost equivalent to current HDP.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Alpha and Beta arrays.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in range(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha *= self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
return alpha, beta
def suggested_lda_model(self):
"""Get a trained ldamodel object which is closest to the current hdp model.
The `num_topics=m_T`, so as to preserve the matrices shapes when we assign alpha and beta.
Returns
-------
:class:`~gensim.models.ldamodel.LdaModel`
Closest corresponding LdaModel to current HdpModel.
"""
alpha, beta = self.hdp_to_lda()
ldam = ldamodel.LdaModel(
num_topics=self.m_T, alpha=alpha, id2word=self.id2word, random_state=self.random_state, dtype=np.float64
)
ldam.expElogbeta[:] = beta
return ldam
def evaluate_test_corpus(self, corpus):
"""Evaluates the model on test corpus.
Parameters
----------
corpus : iterable of list of (int, float)
Test corpus in BoW format.
Returns
-------
float
The value of total likelihood obtained by evaluating the model for all documents in the test corpus.
"""
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f', i, doc_score)
score += likelihood
total_words += sum(doc_word_counts)
logger.info(
"TEST: average score: %.5f, total score: %.5f, test docs: %d",
score / total_words, score, len(corpus)
)
return score
class HdpTopicFormatter:
"""Helper class for :class:`gensim.models.hdpmodel.HdpModel` to format the output of topics."""
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
"""Initialise the :class:`gensim.models.hdpmodel.HdpTopicFormatter` and store topic data in sorted order.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`,optional
Dictionary for the input corpus.
topic_data : numpy.ndarray, optional
The term topic matrix.
topic_file : {file-like object, str, pathlib.Path}
File, filename, or generator to read. If the filename extension is .gz or .bz2, the file is first
decompressed. Note that generators should return byte strings for Python 3k.
style : bool, optional
If True - get the topics as a list of strings, otherwise - get the topics as lists of (word, weight) pairs.
Raises
------
ValueError
Either dictionary is None or both `topic_data` and `topic_file` is None.
"""
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, num_topics=10, num_words=10):
"""Give the most probable `num_words` words from `num_topics` topics.
Alias for :meth:`~gensim.models.hdpmodel.HdpTopicFormatter.show_topics`.
Parameters
----------
num_topics : int, optional
Top `num_topics` to be printed.
num_words : int, optional
Top `num_words` most probable words to be printed from each topic.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for `num_words` words from `num_topics` topics depends on the value of `self.style` attribute.
"""
return self.show_topics(num_topics, num_words, True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Give the most probable `num_words` words from `num_topics` topics.
Parameters
----------
num_topics : int, optional
Top `num_topics` to be printed.
num_words : int, optional
Top `num_words` most probable words to be printed from each topic.
log : bool, optional
If True - log a message with level INFO on the logger object.
formatted : bool, optional
If True - get the topics as a list of strings, otherwise as lists of (word, weight) pairs.
Returns
-------
list of (int, list of (str, numpy.float) **or** list of str)
Output format for terms from `num_topics` topics depends on the value of `self.style` attribute.
"""
shown = []
num_topics = max(num_topics, 0)
num_topics = min(num_topics, len(self.data))
for k in range(num_topics):
lambdak = self.data[k, :]
lambdak = lambdak / lambdak.sum()
temp = zip(lambdak, range(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, num_words)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def print_topic(self, topic_id, topn=None, num_words=None):
"""Print the `topn` most probable words from topic id `topic_id`.
Warnings
--------
The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead.
Parameters
----------
topic_id : int
Acts as a representative index for a particular topic.
topn : int, optional
Number of most probable words to show from given `topic_id`.
num_words : int, optional
DEPRECATED, USE `topn` INSTEAD.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for terms from a single topic depends on the value of `formatted` parameter.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
return self.show_topic(topic_id, topn, formatted=True)
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None,):
"""Give the most probable `num_words` words for the id `topic_id`.
Warnings
--------
The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead.
Parameters
----------
topic_id : int
Acts as a representative index for a particular topic.
topn : int, optional
Number of most probable words to show from given `topic_id`.
log : bool, optional
If True logs a message with level INFO on the logger object, False otherwise.
formatted : bool, optional
If True return the topics as a list of strings, False as lists of
(word, weight) pairs.
num_words : int, optional
DEPRECATED, USE `topn` INSTEAD.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for terms from a single topic depends on the value of `self.style` attribute.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
lambdak = self.data[topic_id, :]
lambdak = lambdak / lambdak.sum()
temp = zip(lambdak, range(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(topic_id, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (topic_id, topic_terms)
# we only return the topic_terms
return topic[1]
def show_topic_terms(self, topic_data, num_words):
"""Give the topic terms along with their probabilities for a single topic data.
Parameters
----------
topic_data : list of (str, numpy.float)
Contains probabilities for each word id belonging to a single topic.
num_words : int
Number of words for which probabilities are to be extracted from the given single topic data.
Returns
-------
list of (str, numpy.float)
A sequence of topic terms and their probabilities.
"""
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:num_words]]
def format_topic(self, topic_id, topic_terms):
"""Format the display for a single topic in two different ways.
Parameters
----------
topic_id : int
Acts as a representative index for a particular topic.
topic_terms : list of (str, numpy.float)
Contains the most probable words from a single topic.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for topic terms depends on the value of `self.style` attribute.
"""
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join('%.3f*%s' % (weight, word) for (word, weight) in topic_terms)
else:
fmt = '\n'.join(' %20s %.8f' % (word, weight) for (word, weight) in topic_terms)
fmt = (topic_id, fmt)
return fmt
| 46,954
|
Python
|
.py
| 958
| 39.303758
| 120
| 0.616948
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,126
|
keyedvectors.py
|
piskvorky_gensim/gensim/models/keyedvectors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gensim Contributors
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module implements word vectors, and more generally sets of vectors keyed by lookup tokens/ints,
and various similarity look-ups.
Since trained word vectors are independent from the way they were trained (:class:`~gensim.models.word2vec.Word2Vec`,
:class:`~gensim.models.fasttext.FastText` etc), they can be represented by a standalone structure,
as implemented in this module.
The structure is called "KeyedVectors" and is essentially a mapping between *keys*
and *vectors*. Each vector is identified by its lookup key, most often a short string token, so this is usually
a mapping between {str => 1D numpy array}.
The key is, in the original motivating case, a word (so the mapping maps words to 1D vectors),
but for some models, the key can also correspond to a document, a graph node etc.
(Because some applications may maintain their own integral identifiers, compact and contiguous
starting at zero, this class also supports use of plain ints as keys – in that case using them as literal
pointers to the position of the desired vector in the underlying array, and saving the overhead of
a lookup map entry.)
Why use KeyedVectors instead of a full model?
=============================================
+---------------------------+--------------+------------+-------------------------------------------------------------+
| capability | KeyedVectors | full model | note |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| continue training vectors | ❌ | ✅ | You need the full model to train or update vectors. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| smaller objects | ✅ | ❌ | KeyedVectors are smaller and need less RAM, because they |
| | | | don't need to store the model state that enables training. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| save/load from native | | | Vectors exported by the Facebook and Google tools |
| fasttext/word2vec format | ✅ | ❌ | do not support further training, but you can still load |
| | | | them into KeyedVectors. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| append new vectors | ✅ | ✅ | Add new-vector entries to the mapping dynamically. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| concurrency | ✅ | ✅ | Thread-safe, allows concurrent vector queries. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| shared RAM | ✅ | ✅ | Multiple processes can re-use the same data, keeping only |
| | | | a single copy in RAM using |
| | | | `mmap <https://en.wikipedia.org/wiki/Mmap>`_. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
| fast load | ✅ | ✅ | Supports `mmap <https://en.wikipedia.org/wiki/Mmap>`_ |
| | | | to load data from disk instantaneously. |
+---------------------------+--------------+------------+-------------------------------------------------------------+
TL;DR: the main difference is that KeyedVectors do not support further training.
On the other hand, by shedding the internal data structures necessary for training, KeyedVectors offer a smaller RAM
footprint and a simpler interface.
How to obtain word vectors?
===========================
Train a full model, then access its `model.wv` property, which holds the standalone keyed vectors.
For example, using the Word2Vec algorithm to train the vectors
.. sourcecode:: pycon
>>> from gensim.test.utils import lee_corpus_list
>>> from gensim.models import Word2Vec
>>>
>>> model = Word2Vec(lee_corpus_list, vector_size=24, epochs=100)
>>> word_vectors = model.wv
Persist the word vectors to disk with
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>>
>>> word_vectors.save('vectors.kv')
>>> reloaded_word_vectors = KeyedVectors.load('vectors.kv')
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
>>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True) # C bin format
What can I do with word vectors?
================================
You can perform various syntactic/semantic NLP word tasks with the trained vectors.
Some of them are already built-in
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>>
>>> word_vectors = api.load("glove-wiki-gigaword-100") # load pre-trained word-vectors from gensim-data
>>>
>>> # Check the "most similar words", using the default "cosine similarity" measure.
>>> result = word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
>>> most_similar_key, similarity = result[0] # look at the first match
>>> print(f"{most_similar_key}: {similarity:.4f}")
queen: 0.7699
>>>
>>> # Use a different similarity measure: "cosmul".
>>> result = word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
>>> most_similar_key, similarity = result[0] # look at the first match
>>> print(f"{most_similar_key}: {similarity:.4f}")
queen: 0.8965
>>>
>>> print(word_vectors.doesnt_match("breakfast cereal dinner lunch".split()))
cereal
>>>
>>> similarity = word_vectors.similarity('woman', 'man')
>>> similarity > 0.8
True
>>>
>>> result = word_vectors.similar_by_word("cat")
>>> most_similar_key, similarity = result[0] # look at the first match
>>> print(f"{most_similar_key}: {similarity:.4f}")
dog: 0.8798
>>>
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>>
>>> similarity = word_vectors.wmdistance(sentence_obama, sentence_president)
>>> print(f"{similarity:.4f}")
3.4893
>>>
>>> distance = word_vectors.distance("media", "media")
>>> print(f"{distance:.1f}")
0.0
>>>
>>> similarity = word_vectors.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
>>> print(f"{similarity:.4f}")
0.7067
>>>
>>> vector = word_vectors['computer'] # numpy vector of a word
>>> vector.shape
(100,)
>>>
>>> vector = word_vectors.wv.get_vector('office', norm=True)
>>> vector.shape
(100,)
Correlation with human opinion on word similarity
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> similarities = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
And on word analogies
.. sourcecode:: pycon
>>> analogy_scores = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))
and so on.
"""
import logging
import sys
import itertools
import warnings
from numbers import Integral
from typing import Iterable
from numpy import (
dot, float32 as REAL, double, zeros, vstack, ndarray,
sum as np_sum, prod, argmax, dtype, ascontiguousarray, frombuffer,
)
import numpy as np
from scipy import stats
from scipy.spatial.distance import cdist
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from gensim.utils import deprecated
logger = logging.getLogger(__name__)
_KEY_TYPES = (str, int, np.integer)
_EXTENDED_KEY_TYPES = (str, int, np.integer, np.ndarray)
def _ensure_list(value):
"""Ensure that the specified value is wrapped in a list, for those supported cases
where we also accept a single key or vector."""
if value is None:
return []
if isinstance(value, _KEY_TYPES) or (isinstance(value, ndarray) and len(value.shape) == 1):
return [value]
if isinstance(value, ndarray) and len(value.shape) == 2:
return list(value)
return value
class KeyedVectors(utils.SaveLoad):
def __init__(self, vector_size, count=0, dtype=np.float32, mapfile_path=None):
"""Mapping between keys (such as words) and vectors for :class:`~gensim.models.Word2Vec`
and related models.
Used to perform operations on the vectors such as vector lookup, distance, similarity etc.
To support the needs of specific models and other downstream uses, you can also set
additional attributes via the :meth:`~gensim.models.keyedvectors.KeyedVectors.set_vecattr`
and :meth:`~gensim.models.keyedvectors.KeyedVectors.get_vecattr` methods.
Note that all such attributes under the same `attr` name must have compatible `numpy`
types, as the type and storage array for such attributes is established by the 1st time such
`attr` is set.
Parameters
----------
vector_size : int
Intended number of dimensions for all contained vectors.
count : int, optional
If provided, vectors wil be pre-allocated for at least this many vectors. (Otherwise
they can be added later.)
dtype : type, optional
Vector dimensions will default to `np.float32` (AKA `REAL` in some Gensim code) unless
another type is provided here.
mapfile_path : string, optional
Currently unused.
"""
self.vector_size = vector_size
# pre-allocating `index_to_key` to full size helps avoid redundant re-allocations, esp for `expandos`
self.index_to_key = [None] * count # fka index2entity or index2word
self.next_index = 0 # pointer to where next new entry will land
self.key_to_index = {}
self.vectors = zeros((count, vector_size), dtype=dtype) # formerly known as syn0
self.norms = None
# "expandos" are extra attributes stored for each key: {attribute_name} => numpy array of values of
# this attribute, with one array value for each vector key.
# The same information used to be stored in a structure called Vocab in Gensim <4.0.0, but
# with different indexing: {vector key} => Vocab object containing all attributes for the given vector key.
#
# Don't modify expandos directly; call set_vecattr()/get_vecattr() instead.
self.expandos = {}
self.mapfile_path = mapfile_path
def __str__(self):
return f"{self.__class__.__name__}<vector_size={self.vector_size}, {len(self)} keys>"
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(KeyedVectors, self)._load_specials(*args, **kwargs)
if hasattr(self, 'doctags'):
self._upconvert_old_d2vkv()
# fixup rename/consolidation into index_to_key of older index2word, index2entity
if not hasattr(self, 'index_to_key'):
self.index_to_key = self.__dict__.pop('index2word', self.__dict__.pop('index2entity', None))
# fixup rename into vectors of older syn0
if not hasattr(self, 'vectors'):
self.vectors = self.__dict__.pop('syn0', None)
self.vector_size = self.vectors.shape[1]
# ensure at least a 'None' in 'norms' to force recalc
if not hasattr(self, 'norms'):
self.norms = None
# ensure at least an empty 'expandos'
if not hasattr(self, 'expandos'):
self.expandos = {}
# fixup rename of vocab into map
if 'key_to_index' not in self.__dict__:
self._upconvert_old_vocab()
# ensure older instances have next_index
if not hasattr(self, 'next_index'):
self.next_index = len(self)
def _upconvert_old_vocab(self):
"""Convert a loaded, pre-gensim-4.0.0 version instance that had a 'vocab' dict of data objects."""
old_vocab = self.__dict__.pop('vocab', None)
self.key_to_index = {}
for k in old_vocab.keys():
old_v = old_vocab[k]
self.key_to_index[k] = old_v.index
for attr in old_v.__dict__.keys():
self.set_vecattr(old_v.index, attr, old_v.__dict__[attr])
# special case to enforce required type on `sample_int`
if 'sample_int' in self.expandos:
self.expandos['sample_int'] = self.expandos['sample_int'].astype(np.uint32)
def allocate_vecattrs(self, attrs=None, types=None):
"""Ensure arrays for given per-vector extra-attribute names & types exist, at right size.
The length of the index_to_key list is canonical 'intended size' of KeyedVectors,
even if other properties (vectors array) hasn't yet been allocated or expanded.
So this allocation targets that size.
"""
# with no arguments, adjust lengths of existing vecattr arrays to match length of index_to_key
if attrs is None:
attrs = list(self.expandos.keys())
types = [self.expandos[attr].dtype for attr in attrs]
target_size = len(self.index_to_key)
for attr, t in zip(attrs, types):
if t is int:
t = np.int64 # ensure 'int' type 64-bit (numpy-on-Windows https://github.com/numpy/numpy/issues/9464)
if t is str:
# Avoid typing numpy arrays as strings, because numpy would use its fixed-width `dtype=np.str_`
# dtype, which uses too much memory!
t = object
if attr not in self.expandos:
self.expandos[attr] = np.zeros(target_size, dtype=t)
continue
prev_expando = self.expandos[attr]
if not np.issubdtype(t, prev_expando.dtype):
raise TypeError(
f"Can't allocate type {t} for attribute {attr}, "
f"conflicts with its existing type {prev_expando.dtype}"
)
if len(prev_expando) == target_size:
continue # no resizing necessary
prev_count = len(prev_expando)
self.expandos[attr] = np.zeros(target_size, dtype=prev_expando.dtype)
self.expandos[attr][: min(prev_count, target_size), ] = prev_expando[: min(prev_count, target_size), ]
def set_vecattr(self, key, attr, val):
"""Set attribute associated with the given key to value.
Parameters
----------
key : str
Store the attribute for this vector key.
attr : str
Name of the additional attribute to store for the given key.
val : object
Value of the additional attribute to store for the given key.
Returns
-------
None
"""
self.allocate_vecattrs(attrs=[attr], types=[type(val)])
index = self.get_index(key)
self.expandos[attr][index] = val
def get_vecattr(self, key, attr):
"""Get attribute value associated with given key.
Parameters
----------
key : str
Vector key for which to fetch the attribute value.
attr : str
Name of the additional attribute to fetch for the given key.
Returns
-------
object
Value of the additional attribute fetched for the given key.
"""
index = self.get_index(key)
return self.expandos[attr][index]
def resize_vectors(self, seed=0):
"""Make underlying vectors match index_to_key size; random-initialize any new rows."""
target_shape = (len(self.index_to_key), self.vector_size)
self.vectors = prep_vectors(target_shape, prior_vectors=self.vectors, seed=seed)
self.allocate_vecattrs()
self.norms = None
def __len__(self):
return len(self.index_to_key)
def __getitem__(self, key_or_keys):
"""Get vector representation of `key_or_keys`.
Parameters
----------
key_or_keys : {str, list of str, int, list of int}
Requested key or list-of-keys.
Returns
-------
numpy.ndarray
Vector representation for `key_or_keys` (1D if `key_or_keys` is single key, otherwise - 2D).
"""
if isinstance(key_or_keys, _KEY_TYPES):
return self.get_vector(key_or_keys)
return vstack([self.get_vector(key) for key in key_or_keys])
def get_index(self, key, default=None):
"""Return the integer index (slot/position) where the given key's vector is stored in the
backing vectors array.
"""
val = self.key_to_index.get(key, -1)
if val >= 0:
return val
elif isinstance(key, (int, np.integer)) and 0 <= key < len(self.index_to_key):
return key
elif default is not None:
return default
else:
raise KeyError(f"Key '{key}' not present")
def get_vector(self, key, norm=False):
"""Get the key's vector, as a 1D numpy array.
Parameters
----------
key : str
Key for vector to return.
norm : bool, optional
If True, the resulting vector will be L2-normalized (unit Euclidean length).
Returns
-------
numpy.ndarray
Vector for the specified key.
Raises
------
KeyError
If the given key doesn't exist.
"""
index = self.get_index(key)
if norm:
self.fill_norms()
result = self.vectors[index] / self.norms[index]
else:
result = self.vectors[index]
result.setflags(write=False) # disallow direct tampering that would invalidate `norms` etc
return result
@deprecated("Use get_vector instead")
def word_vec(self, *args, **kwargs):
"""Compatibility alias for get_vector(); must exist so subclass calls reach subclass get_vector()."""
return self.get_vector(*args, **kwargs)
def get_mean_vector(self, keys, weights=None, pre_normalize=True, post_normalize=False, ignore_missing=True):
"""Get the mean vector for a given list of keys.
Parameters
----------
keys : list of (str or int or ndarray)
Keys specified by string or int ids or numpy array.
weights : list of float or numpy.ndarray, optional
1D array of same size of `keys` specifying the weight for each key.
pre_normalize : bool, optional
Flag indicating whether to normalize each keyvector before taking mean.
If False, individual keyvector will not be normalized.
post_normalize: bool, optional
Flag indicating whether to normalize the final mean vector.
If True, normalized mean vector will be return.
ignore_missing : bool, optional
If False, will raise error if a key doesn't exist in vocabulary.
Returns
-------
numpy.ndarray
Mean vector for the list of keys.
Raises
------
ValueError
If the size of the list of `keys` and `weights` doesn't match.
KeyError
If any of the key doesn't exist in vocabulary and `ignore_missing` is false.
"""
if len(keys) == 0:
raise ValueError("cannot compute mean with no input")
if isinstance(weights, list):
weights = np.array(weights)
if weights is None:
weights = np.ones(len(keys))
if len(keys) != weights.shape[0]: # weights is a 1-D numpy array
raise ValueError(
"keys and weights array must have same number of elements"
)
mean = np.zeros(self.vector_size, self.vectors.dtype)
total_weight = 0
for idx, key in enumerate(keys):
if isinstance(key, ndarray):
mean += weights[idx] * key
total_weight += abs(weights[idx])
elif self.__contains__(key):
vec = self.get_vector(key, norm=pre_normalize)
mean += weights[idx] * vec
total_weight += abs(weights[idx])
elif not ignore_missing:
raise KeyError(f"Key '{key}' not present in vocabulary")
if total_weight > 0:
mean = mean / total_weight
if post_normalize:
mean = matutils.unitvec(mean).astype(REAL)
return mean
def add_vector(self, key, vector):
"""Add one new vector at the given key, into existing slot if available.
Warning: using this repeatedly is inefficient, requiring a full reallocation & copy,
if this instance hasn't been preallocated to be ready for such incremental additions.
Parameters
----------
key: str
Key identifier of the added vector.
vector: numpy.ndarray
1D numpy array with the vector values.
Returns
-------
int
Index of the newly added vector, so that ``self.vectors[result] == vector`` and
``self.index_to_key[result] == key``.
"""
target_index = self.next_index
if target_index >= len(self) or self.index_to_key[target_index] is not None:
# must append at end by expanding existing structures
target_index = len(self)
warnings.warn(
"Adding single vectors to a KeyedVectors which grows by one each time can be costly. "
"Consider adding in batches or preallocating to the required size.",
UserWarning)
self.add_vectors([key], [vector])
self.allocate_vecattrs() # grow any adjunct arrays
self.next_index = target_index + 1
else:
# can add to existing slot
self.index_to_key[target_index] = key
self.key_to_index[key] = target_index
self.vectors[target_index] = vector
self.next_index += 1
return target_index
def add_vectors(self, keys, weights, extras=None, replace=False):
"""Append keys and their vectors in a manual way.
If some key is already in the vocabulary, the old vector is kept unless `replace` flag is True.
Parameters
----------
keys : list of (str or int)
Keys specified by string or int ids.
weights: list of numpy.ndarray or numpy.ndarray
List of 1D np.array vectors or a 2D np.array of vectors.
replace: bool, optional
Flag indicating whether to replace vectors for keys which already exist in the map;
if True - replace vectors, otherwise - keep old vectors.
"""
if isinstance(keys, _KEY_TYPES):
keys = [keys]
weights = np.array(weights).reshape(1, -1)
elif isinstance(weights, list):
weights = np.array(weights)
if extras is None:
extras = {}
# TODO? warn if not matching extras already present?
# initially allocate extras, check type compatibility
self.allocate_vecattrs(extras.keys(), [extras[k].dtype for k in extras.keys()])
in_vocab_mask = np.zeros(len(keys), dtype=bool)
for idx, key in enumerate(keys):
if key in self.key_to_index:
in_vocab_mask[idx] = True
# add new entities to the vocab
for idx in np.nonzero(~in_vocab_mask)[0]:
key = keys[idx]
self.key_to_index[key] = len(self.index_to_key)
self.index_to_key.append(key)
# add vectors, extras for new entities
self.vectors = vstack((self.vectors, weights[~in_vocab_mask].astype(self.vectors.dtype)))
for attr, extra in extras:
self.expandos[attr] = np.vstack((self.expandos[attr], extra[~in_vocab_mask]))
# change vectors, extras for in_vocab entities if `replace` flag is specified
if replace:
in_vocab_idxs = [self.get_index(keys[idx]) for idx in np.nonzero(in_vocab_mask)[0]]
self.vectors[in_vocab_idxs] = weights[in_vocab_mask]
for attr, extra in extras:
self.expandos[attr][in_vocab_idxs] = extra[in_vocab_mask]
def __setitem__(self, keys, weights):
"""Add keys and theirs vectors in a manual way.
If some key is already in the vocabulary, old vector is replaced with the new one.
This method is an alias for :meth:`~gensim.models.keyedvectors.KeyedVectors.add_vectors`
with `replace=True`.
Parameters
----------
keys : {str, int, list of (str or int)}
keys specified by their string or int ids.
weights: list of numpy.ndarray or numpy.ndarray
List of 1D np.array vectors or 2D np.array of vectors.
"""
if not isinstance(keys, list):
keys = [keys]
weights = weights.reshape(1, -1)
self.add_vectors(keys, weights, replace=True)
def has_index_for(self, key):
"""Can this model return a single index for this key?
Subclasses that synthesize vectors for out-of-vocabulary words (like
:class:`~gensim.models.fasttext.FastText`) may respond True for a
simple `word in wv` (`__contains__()`) check but False for this
more-specific check.
"""
return self.get_index(key, -1) >= 0
def __contains__(self, key):
return self.has_index_for(key)
def most_similar_to_given(self, key1, keys_list):
"""Get the `key` from `keys_list` most similar to `key1`."""
return keys_list[argmax([self.similarity(key1, key) for key in keys_list])]
def closer_than(self, key1, key2):
"""Get all keys that are closer to `key1` than `key2` is to `key1`."""
all_distances = self.distances(key1)
e1_index = self.get_index(key1)
e2_index = self.get_index(key2)
closer_node_indices = np.where(all_distances < all_distances[e2_index])[0]
return [self.index_to_key[index] for index in closer_node_indices if index != e1_index]
@deprecated("Use closer_than instead")
def words_closer_than(self, word1, word2):
return self.closer_than(word1, word2)
def rank(self, key1, key2):
"""Rank of the distance of `key2` from `key1`, in relation to distances of all keys from `key1`."""
return len(self.closer_than(key1, key2)) + 1
@property
def vectors_norm(self):
raise AttributeError(
"The `.vectors_norm` attribute is computed dynamically since Gensim 4.0.0. "
"Use `.get_normed_vectors()` instead.\n"
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
@vectors_norm.setter
def vectors_norm(self, _):
pass # ignored but must remain for backward serialization compatibility
def get_normed_vectors(self):
"""Get all embedding vectors normalized to unit L2 length (euclidean), as a 2D numpy array.
To see which key corresponds to which vector = which array row, refer
to the :attr:`~gensim.models.keyedvectors.KeyedVectors.index_to_key` attribute.
Returns
-------
numpy.ndarray:
2D numpy array of shape ``(number_of_keys, embedding dimensionality)``, L2-normalized
along the rows (key vectors).
"""
self.fill_norms()
return self.vectors / self.norms[..., np.newaxis]
def fill_norms(self, force=False):
"""
Ensure per-vector norms are available.
Any code which modifies vectors should ensure the accompanying norms are
either recalculated or 'None', to trigger a full recalculation later on-request.
"""
if self.norms is None or force:
self.norms = np.linalg.norm(self.vectors, axis=1)
@property
def index2entity(self):
raise AttributeError(
"The index2entity attribute has been replaced by index_to_key since Gensim 4.0.0.\n"
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
@index2entity.setter
def index2entity(self, value):
self.index_to_key = value # must remain for backward serialization compatibility
@property
def index2word(self):
raise AttributeError(
"The index2word attribute has been replaced by index_to_key since Gensim 4.0.0.\n"
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
@index2word.setter
def index2word(self, value):
self.index_to_key = value # must remain for backward serialization compatibility
@property
def vocab(self):
raise AttributeError(
"The vocab attribute was removed from KeyedVector in Gensim 4.0.0.\n"
"Use KeyedVector's .key_to_index dict, .index_to_key list, and methods "
".get_vecattr(key, attr) and .set_vecattr(key, attr, new_val) instead.\n"
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
@vocab.setter
def vocab(self, value):
self.vocab() # trigger above NotImplementedError
def sort_by_descending_frequency(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if not len(self):
return # noop if empty
count_sorted_indexes = np.argsort(self.expandos['count'])[::-1]
self.index_to_key = [self.index_to_key[idx] for idx in count_sorted_indexes]
self.allocate_vecattrs()
for k in self.expandos:
# Use numpy's "fancy indexing" to permutate the entire array in one step.
self.expandos[k] = self.expandos[k][count_sorted_indexes]
if len(self.vectors):
logger.warning("sorting after vectors have been allocated is expensive & error-prone")
self.vectors = self.vectors[count_sorted_indexes]
self.key_to_index = {word: i for i, word in enumerate(self.index_to_key)}
def save(self, *args, **kwargs):
"""Save KeyedVectors to a file.
Parameters
----------
fname_or_handle : str
Path to the output file.
See Also
--------
:meth:`~gensim.models.keyedvectors.KeyedVectors.load`
Load a previously saved model.
"""
super(KeyedVectors, self).save(*args, **kwargs)
def most_similar(
self, positive=None, negative=None, topn=10, clip_start=0, clip_end=None,
restrict_vocab=None, indexer=None,
):
"""Find the top-N most similar keys.
Positive keys contribute positively towards the similarity, negative keys negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given keys and the vectors for each key in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : list of (str or int or ndarray) or list of ((str,float) or (int,float) or (ndarray,float)), optional
List of keys that contribute positively. If tuple, second element specifies the weight (default `1.0`)
negative : list of (str or int or ndarray) or list of ((str,float) or (int,float) or (ndarray,float)), optional
List of keys that contribute negatively. If tuple, second element specifies the weight (default `-1.0`)
topn : int or None, optional
Number of top-N similar keys to return, when `topn` is int. When `topn` is None,
then similarities for all keys are returned.
clip_start : int
Start clipping index.
clip_end : int
End clipping index.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 key vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.) If
specified, overrides any values of ``clip_start`` or ``clip_end``.
Returns
-------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (key, similarity) is returned.
When `topn` is None, then similarities for all keys are returned as a
one-dimensional numpy array with the size of the vocabulary.
"""
if isinstance(topn, Integral) and topn < 1:
return []
# allow passing a single string-key or vector for the positive/negative arguments
positive = _ensure_list(positive)
negative = _ensure_list(negative)
self.fill_norms()
clip_end = clip_end or len(self.vectors)
if restrict_vocab:
clip_start = 0
clip_end = restrict_vocab
# add weights for each key, if not already present; default to 1.0 for positive and -1.0 for negative keys
keys = []
weight = np.concatenate((np.ones(len(positive)), -1.0 * np.ones(len(negative))))
for idx, item in enumerate(positive + negative):
if isinstance(item, _EXTENDED_KEY_TYPES):
keys.append(item)
else:
keys.append(item[0])
weight[idx] = item[1]
# compute the weighted average of all keys
mean = self.get_mean_vector(keys, weight, pre_normalize=True, post_normalize=True, ignore_missing=False)
all_keys = [
self.get_index(key) for key in keys if isinstance(key, _KEY_TYPES) and self.has_index_for(key)
]
if indexer is not None and isinstance(topn, int):
return indexer.most_similar(mean, topn)
dists = dot(self.vectors[clip_start:clip_end], mean) / self.norms[clip_start:clip_end]
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_keys), reverse=True)
# ignore (don't return) keys from the input
result = [
(self.index_to_key[sim + clip_start], float(dists[sim]))
for sim in best if (sim + clip_start) not in all_keys
]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""Compatibility alias for similar_by_key()."""
return self.similar_by_key(word, topn, restrict_vocab)
def similar_by_key(self, key, topn=10, restrict_vocab=None):
"""Find the top-N most similar keys.
Parameters
----------
key : str
Key
topn : int or None, optional
Number of top-N similar keys to return. If topn is None, similar_by_key returns
the vector of similarity scores.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 key vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (key, similarity) is returned.
When `topn` is None, then similarities for all keys are returned as a
one-dimensional numpy array with the size of the vocabulary.
"""
return self.most_similar(positive=[key], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""Find the top-N most similar keys by vector.
Parameters
----------
vector : numpy.array
Vector from which similarities are to be computed.
topn : int or None, optional
Number of top-N similar keys to return, when `topn` is int. When `topn` is None,
then similarities for all keys are returned.
restrict_vocab : int, optional
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 key vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (key, similarity) is returned.
When `topn` is None, then similarities for all keys are returned as a
one-dimensional numpy array with the size of the vocabulary.
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def wmdistance(self, document1, document2, norm=True):
"""Compute the Word Mover's Distance between two documents.
When using this code, please consider citing the following papers:
* `Rémi Flamary et al. "POT: Python Optimal Transport"
<https://jmlr.org/papers/v22/20-451.html>`_
* `Matt Kusner et al. "From Word Embeddings To Document Distances"
<http://proceedings.mlr.press/v37/kusnerb15.pdf>`_.
Parameters
----------
document1 : list of str
Input document.
document2 : list of str
Input document.
norm : boolean
Normalize all word vectors to unit length before computing the distance?
Defaults to True.
Returns
-------
float
Word Mover's distance between `document1` and `document2`.
Warnings
--------
This method only works if `POT <https://pypi.org/project/POT/>`_ is installed.
If one of the documents have no words that exist in the vocab, `float('inf')` (i.e. infinity)
will be returned.
Raises
------
ImportError
If `POT <https://pypi.org/project/POT/>`_ isn't installed.
"""
# If POT is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
from ot import emd2
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if not document1 or not document2:
logger.warning("At least one of the documents had no words that were in the vocabulary.")
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed of a single unique token => zero distance.
return 0.0
doclist1 = list(set(document1))
doclist2 = list(set(document2))
v1 = np.array([self.get_vector(token, norm=norm) for token in doclist1])
v2 = np.array([self.get_vector(token, norm=norm) for token in doclist2])
doc1_indices = dictionary.doc2idx(doclist1)
doc2_indices = dictionary.doc2idx(doclist2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
distance_matrix[np.ix_(doc1_indices, doc2_indices)] = cdist(v1, v2)
if abs(np_sum(distance_matrix)) < 1e-8:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents. This is what POT expects on input.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd2(d1, d2, distance_matrix)
def most_similar_cosmul(
self, positive=None, negative=None, topn=10, restrict_vocab=None
):
"""Find the top-N most similar words, using the multiplicative combination objective,
proposed by `Omer Levy and Yoav Goldberg "Linguistic Regularities in Sparse and Explicit Word Representations"
<http://www.aclweb.org/anthology/W14-1618>`_. Positive words still contribute positively towards the similarity,
negative words negatively, but with less susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively - a potentially sensible but untested extension of the method.
With a single positive example, rankings will be the same as in the default
:meth:`~gensim.models.keyedvectors.KeyedVectors.most_similar`.
Allows calls like most_similar_cosmul('dog', 'cat'), as a shorthand for
most_similar_cosmul(['dog'], ['cat']) where 'dog' is positive and 'cat' negative
Parameters
----------
positive : list of str, optional
List of words that contribute positively.
negative : list of str, optional
List of words that contribute negatively.
topn : int or None, optional
Number of top-N similar words to return, when `topn` is int. When `topn` is None,
then similarities for all words are returned.
restrict_vocab : int or None, optional
Optional integer which limits the range of vectors which are searched for most-similar values.
For example, restrict_vocab=10000 would only check the first 10000 node vectors in the vocabulary order.
This may be meaningful if vocabulary is sorted by descending frequency.
Returns
-------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (word, similarity) is returned.
When `topn` is None, then similarities for all words are returned as a
one-dimensional numpy array with the size of the vocabulary.
"""
# TODO: Update to better match & share code with most_similar()
if isinstance(topn, Integral) and topn < 1:
return []
# allow passing a single string-key or vector for the positive/negative arguments
positive = _ensure_list(positive)
negative = _ensure_list(negative)
self.init_sims()
if isinstance(positive, str):
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
if isinstance(negative, str):
negative = [negative]
all_words = {
self.get_index(word) for word in positive + negative
if not isinstance(word, ndarray) and word in self.key_to_index
}
positive = [
self.get_vector(word, norm=True) if isinstance(word, str) else word
for word in positive
]
negative = [
self.get_vector(word, norm=True) if isinstance(word, str) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.vectors, term) / self.norms) / 2) for term in positive]
neg_dists = [((1 + dot(self.vectors, term) / self.norms) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index_to_key[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def rank_by_centrality(self, words, use_norm=True):
"""Rank the given words by similarity to the centroid of all the words.
Parameters
----------
words : list of str
List of keys.
use_norm : bool, optional
Whether to calculate centroid using unit-normed vectors; default True.
Returns
-------
list of (float, str)
Ranked list of (similarity, key), most-similar to the centroid first.
"""
self.fill_norms()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack([self.get_vector(word, norm=use_norm) for word in used_words]).astype(REAL)
mean = self.get_mean_vector(vectors, post_normalize=True)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words), reverse=True)
def doesnt_match(self, words):
"""Which key from the given list doesn't go with the others?
Parameters
----------
words : list of str
List of keys.
Returns
-------
str
The key further away from the mean of all keys.
"""
return self.rank_by_centrality(words)[-1][1]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""Compute cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.ndarray
Vector from which similarities are to be computed, expected shape (dim,).
vectors_all : numpy.ndarray
For each row in vectors_all, distance from vector_1 is computed, expected shape (num_vectors, dim).
Returns
-------
numpy.ndarray
Contains cosine distance between `vector_1` and each row in `vectors_all`, shape (num_vectors,).
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words=()):
"""Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vector` and all words in vocab.
Parameters
----------
word_or_vector : {str, numpy.ndarray}
Word or vector from which distances are to be computed.
other_words : iterable of str
For each word in `other_words` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`.
Raises
-----
KeyError
If either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, _KEY_TYPES):
input_vector = self.get_vector(word_or_vector)
else:
input_vector = word_or_vector
if not other_words:
other_vectors = self.vectors
else:
other_indices = [self.get_index(word) for word in other_words]
other_vectors = self.vectors[other_indices]
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, w1, w2):
"""Compute cosine distance between two keys.
Calculate 1 - :meth:`~gensim.models.keyedvectors.KeyedVectors.similarity`.
Parameters
----------
w1 : str
Input key.
w2 : str
Input key.
Returns
-------
float
Distance between `w1` and `w2`.
"""
return 1 - self.similarity(w1, w2)
def similarity(self, w1, w2):
"""Compute cosine similarity between two keys.
Parameters
----------
w1 : str
Input key.
w2 : str
Input key.
Returns
-------
float
Cosine similarity between `w1` and `w2`.
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""Compute cosine similarity between two sets of keys.
Parameters
----------
ws1 : list of str
Sequence of keys.
ws2: list of str
Sequence of keys.
Returns
-------
numpy.ndarray
Similarities between `ws1` and `ws2`.
"""
if not (len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
mean1 = self.get_mean_vector(ws1, pre_normalize=False)
mean2 = self.get_mean_vector(ws2, pre_normalize=False)
return dot(matutils.unitvec(mean1), matutils.unitvec(mean2))
@staticmethod
def _log_evaluate_word_analogies(section):
"""Calculate score by section, helper for
:meth:`~gensim.models.keyedvectors.KeyedVectors.evaluate_word_analogies`.
Parameters
----------
section : dict of (str, (str, str, str, str))
Section given from evaluation.
Returns
-------
float
Accuracy score if at least one prediction was made (correct or incorrect).
Or return 0.0 if there were no predictions at all in this section.
"""
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect == 0:
return 0.0
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i)", section['section'], 100.0 * score, correct, correct + incorrect)
return score
def evaluate_word_analogies(
self, analogies, restrict_vocab=300000, case_insensitive=True,
dummy4unknown=False, similarity_function='most_similar'):
"""Compute performance of the model on an analogy test set.
The accuracy is reported (printed to log and returned as a score) for each section separately,
plus there's one aggregate summary at the end.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
See also `Analogy (State of the art) <https://aclweb.org/aclwiki/Analogy_(State_of_the_art)>`_.
Parameters
----------
analogies : str
Path to file, where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines.
See `gensim/test/test_data/questions-words.txt` as example.
restrict_vocab : int, optional
Ignore all 4-tuples containing a word not in the first `restrict_vocab` words.
This may be meaningful if you've sorted the model vocabulary by descending frequency (which is standard
in modern word embedding models).
case_insensitive : bool, optional
If True - convert all words to their uppercase form before evaluating the performance.
Useful to handle case-mismatch between training tokens and words in the test set.
In case of multiple case variants of a single word, the vector for the first occurrence
(also the most frequent if vocabulary is sorted) is taken.
dummy4unknown : bool, optional
If True - produce zero accuracies for 4-tuples with out-of-vocabulary words.
Otherwise, these tuples are skipped entirely and not used in the evaluation.
similarity_function : str, optional
Function name used for similarity calculation.
Returns
-------
score : float
The overall evaluation score on the entire evaluation set
sections : list of dict of {str : str or list of tuple of (str, str, str, str)}
Results broken down by each section of the evaluation set. Each dict contains the name of the section
under the key 'section', and lists of correctly and incorrectly predicted 4-tuples of words under the
keys 'correct' and 'incorrect'.
"""
ok_keys = self.index_to_key[:restrict_vocab]
if case_insensitive:
ok_vocab = {k.upper(): self.get_index(k) for k in reversed(ok_keys)}
else:
ok_vocab = {k: self.get_index(k) for k in reversed(ok_keys)}
oov = 0
logger.info("Evaluating word analogies for top %i words in the model on %s", restrict_vocab, analogies)
sections, section = [], None
quadruplets_no = 0
with utils.open(analogies, 'rb') as fin:
for line_no, line in enumerate(fin):
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self._log_evaluate_word_analogies(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("Missing section header before line #%i in %s" % (line_no, analogies))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("Skipping invalid line #%i in %s", line_no, analogies)
continue
quadruplets_no += 1
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero accuracy for line #%d with OOV words: %s', line_no, line.strip())
section['incorrect'].append((a, b, c, expected))
else:
logger.debug("Skipping line #%i with OOV words: %s", line_no, line.strip())
continue
original_key_to_index = self.key_to_index
self.key_to_index = ok_vocab
ignore = {a, b, c} # input words to be ignored
predicted = None
# find the most likely prediction using 3CosAdd (vector offset) method
# TODO: implement 3CosMul and set-based methods for solving analogies
sims = self.most_similar(positive=[b, c], negative=[a], topn=5, restrict_vocab=restrict_vocab)
self.key_to_index = original_key_to_index
for element in sims:
predicted = element[0].upper() if case_insensitive else element[0]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self._log_evaluate_word_analogies(section)
total = {
'section': 'Total accuracy',
'correct': list(itertools.chain.from_iterable(s['correct'] for s in sections)),
'incorrect': list(itertools.chain.from_iterable(s['incorrect'] for s in sections)),
}
oov_ratio = float(oov) / quadruplets_no * 100
logger.info('Quadruplets with out-of-vocabulary words: %.1f%%', oov_ratio)
if not dummy4unknown:
logger.info(
'NB: analogies containing OOV words were skipped from evaluation! '
'To change this behavior, use "dummy4unknown=True"'
)
analogies_score = self._log_evaluate_word_analogies(total)
sections.append(total)
# Return the overall score and the full lists of correct and incorrect analogies
return analogies_score, sections
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info(
"%s: %.1f%% (%i/%i)",
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect,
)
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.info('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.info('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.info('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(
self, pairs, delimiter='\t', encoding='utf8',
restrict_vocab=300000, case_insensitive=True, dummy4unknown=False,
):
"""Compute correlation of the model with human similarity judgments.
Notes
-----
More datasets can be found at
* http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html
* https://www.cl.cam.ac.uk/~fh295/simlex.html.
Parameters
----------
pairs : str
Path to file, where lines are 3-tuples, each consisting of a word pair and a similarity value.
See `test/test_data/wordsim353.tsv` as example.
delimiter : str, optional
Separator in `pairs` file.
restrict_vocab : int, optional
Ignore all 4-tuples containing a word not in the first `restrict_vocab` words.
This may be meaningful if you've sorted the model vocabulary by descending frequency (which is standard
in modern word embedding models).
case_insensitive : bool, optional
If True - convert all words to their uppercase form before evaluating the performance.
Useful to handle case-mismatch between training tokens and words in the test set.
In case of multiple case variants of a single word, the vector for the first occurrence
(also the most frequent if vocabulary is sorted) is taken.
dummy4unknown : bool, optional
If True - produce zero accuracies for 4-tuples with out-of-vocabulary words.
Otherwise, these tuples are skipped entirely and not used in the evaluation.
Returns
-------
pearson : tuple of (float, float)
Pearson correlation coefficient with 2-tailed p-value.
spearman : tuple of (float, float)
Spearman rank-order correlation coefficient between the similarities from the dataset and the
similarities produced by the model itself, with 2-tailed p-value.
oov_ratio : float
The ratio of pairs with unknown words.
"""
ok_keys = self.index_to_key[:restrict_vocab]
if case_insensitive:
ok_vocab = {k.upper(): self.get_index(k) for k in reversed(ok_keys)}
else:
ok_vocab = {k: self.get_index(k) for k in reversed(ok_keys)}
similarity_gold = []
similarity_model = []
oov = 0
original_key_to_index, self.key_to_index = self.key_to_index, ok_vocab
try:
with utils.open(pairs, encoding=encoding) as fin:
for line_no, line in enumerate(fin):
if not line or line.startswith('#'): # Ignore lines with comments.
continue
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('Skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero similarity for line #%d with OOV words: %s', line_no, line.strip())
similarity_model.append(0.0)
similarity_gold.append(sim)
else:
logger.info('Skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(self.similarity(a, b)) # Similarity from the model
finally:
self.key_to_index = original_key_to_index
assert len(similarity_gold) == len(similarity_model)
if not similarity_gold:
raise ValueError(
f"No valid similarity judgements found in {pairs}: either invalid format or "
f"all are out-of-vocabulary in {self}"
)
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
if dummy4unknown:
oov_ratio = float(oov) / len(similarity_gold) * 100
else:
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
@deprecated(
"Use fill_norms() instead. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""Precompute data helpful for bulk similarity calculations.
:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms` now preferred for this purpose.
Parameters
----------
replace : bool, optional
If True - forget the original vectors and only keep the normalized ones.
Warnings
--------
You **cannot sensibly continue training** after doing a replace on a model's
internal KeyedVectors, and a replace is no longer necessary to save RAM. Do not use this method.
"""
self.fill_norms()
if replace:
logger.warning("destructive init_sims(replace=True) deprecated & no longer required for space-efficiency")
self.unit_normalize_all()
def unit_normalize_all(self):
"""Destructively scale all vectors to unit-length.
You cannot sensibly continue training after such a step.
"""
self.fill_norms()
self.vectors /= self.norms[..., np.newaxis]
self.norms = np.ones((len(self.vectors),))
def relative_cosine_similarity(self, wa, wb, topn=10):
"""Compute the relative cosine similarity between two words given top-n similar words,
by `Artuur Leeuwenberga, Mihaela Velab , Jon Dehdaribc, Josef van Genabithbc "A Minimally Supervised Approach
for Synonym Extraction with Word Embeddings" <https://ufal.mff.cuni.cz/pbml/105/art-leeuwenberg-et-al.pdf>`_.
To calculate relative cosine similarity between two words, equation (1) of the paper is used.
For WordNet synonyms, if rcs(topn=10) is greater than 0.10 then wa and wb are more similar than
any arbitrary word pairs.
Parameters
----------
wa: str
Word for which we have to look top-n similar word.
wb: str
Word for which we evaluating relative cosine similarity with wa.
topn: int, optional
Number of top-n similar words to look with respect to wa.
Returns
-------
numpy.float64
Relative cosine similarity between wa and wb.
"""
sims = self.similar_by_word(wa, topn)
if not sims:
raise ValueError("Cannot calculate relative cosine similarity without any similar words.")
rcs = float(self.similarity(wa, wb)) / (sum(sim for _, sim in sims))
return rcs
def save_word2vec_format(
self, fname, fvocab=None, binary=False, total_vec=None, write_header=True,
prefix='', append=False, sort_attr='count',
):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
File path to save the vectors to.
fvocab : str, optional
File path to save additional vocabulary information to. `None` to not store the vocabulary.
binary : bool, optional
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int, optional
Explicitly specify total number of vectors
(in case word vectors are appended with document vectors afterwards).
write_header : bool, optional
If False, don't write the 1st line declaring the count of vectors and dimensions.
This is the format used by e.g. gloVe vectors.
prefix : str, optional
String to prepend in front of each stored word. Default = no prefix.
append : bool, optional
If set, open `fname` in `ab` mode instead of the default `wb` mode.
sort_attr : str, optional
Sort the output vectors in descending order of this attribute. Default: most frequent keys first.
"""
if total_vec is None:
total_vec = len(self.index_to_key)
mode = 'wb' if not append else 'ab'
if sort_attr in self.expandos:
store_order_vocab_keys = sorted(self.key_to_index.keys(), key=lambda k: -self.get_vecattr(k, sort_attr))
else:
# This can happen even for the default `count`: the "native C word2vec" format does not store counts,
# so models loaded via load_word2vec_format() do not have the "count" attribute set. They have
# no attributes at all, and fall under this code path.
if fvocab is not None:
raise ValueError(f"Cannot store vocabulary with '{sort_attr}' because that attribute does not exist")
logger.warning(
"attribute %s not present in %s; will store in internal index_to_key order",
sort_attr, self,
)
store_order_vocab_keys = self.index_to_key
if fvocab is not None:
logger.info("storing vocabulary in %s", fvocab)
with utils.open(fvocab, mode) as vout:
for word in store_order_vocab_keys:
vout.write(f"{prefix}{word} {self.get_vecattr(word, sort_attr)}\n".encode('utf8'))
logger.info("storing %sx%s projection weights into %s", total_vec, self.vector_size, fname)
assert (len(self.index_to_key), self.vector_size) == self.vectors.shape
# After (possibly-empty) initial range of int-only keys in Doc2Vec,
# store in sorted order: most frequent keys at the top.
# XXX: get rid of this: not used much, too complex and brittle.
# See https://github.com/RaRe-Technologies/gensim/pull/2981#discussion_r512969788
index_id_count = 0
for i, val in enumerate(self.index_to_key):
if i != val:
break
index_id_count += 1
keys_to_write = itertools.chain(range(0, index_id_count), store_order_vocab_keys)
# Store the actual vectors to the output file, in the order defined by sort_attr.
with utils.open(fname, mode) as fout:
if write_header:
fout.write(f"{total_vec} {self.vector_size}\n".encode('utf8'))
for key in keys_to_write:
key_vector = self[key]
if binary:
fout.write(f"{prefix}{key} ".encode('utf8') + key_vector.astype(REAL).tobytes())
else:
fout.write(f"{prefix}{key} {' '.join(repr(val) for val in key_vector)}\n".encode('utf8'))
@classmethod
def load_word2vec_format(
cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL, no_header=False,
):
"""Load KeyedVectors from a file produced by the original C word2vec-tool format.
Warnings
--------
The information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
no_header : bool, optional
Default False means a usual word2vec-format file, with a 1st line declaring the count of
following vectors & number of dimensions. If True, the file is assumed to lack a declaratory
(vocab_size, vector_size) header and instead start with the 1st vector, and an extra
reading-pass will be used to discover the number of vectors. Works only with `binary=False`.
Returns
-------
:class:`~gensim.models.keyedvectors.KeyedVectors`
Loaded model.
"""
return _load_word2vec_format(
cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype, no_header=no_header,
)
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""Merge in an input-hidden weight matrix loaded from the original C word2vec-tool format,
where it intersects with the current vocabulary.
No words are added to the existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.
Parameters
----------
fname : str
The file path to load the vectors from.
lockf : float, optional
Lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
binary : bool, optional
If True, `fname` is in the binary word2vec C format.
encoding : str, optional
Encoding of `text` for `unicode` function (python2 only).
unicode_errors : str, optional
Error handling behaviour, used as parameter for `unicode` function (python2 only).
"""
overlap_count = 0
logger.info("loading projection weights from %s", fname)
with utils.open(fname, 'rb') as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TODO: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in range(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = np.fromstring(fin.read(binary_len), dtype=REAL)
if word in self.key_to_index:
overlap_count += 1
self.vectors[self.get_index(word)] = weights
self.vectors_lockf[self.get_index(word)] = lockf # lock-factor: 0.0=no changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
if word in self.key_to_index:
overlap_count += 1
self.vectors[self.get_index(word)] = weights
self.vectors_lockf[self.get_index(word)] = lockf # lock-factor: 0.0=no changes
self.add_lifecycle_event(
"intersect_word2vec_format",
msg=f"merged {overlap_count} vectors into {self.vectors.shape} matrix from {fname}",
)
def vectors_for_all(self, keys: Iterable, allow_inference: bool = True,
copy_vecattrs: bool = False) -> 'KeyedVectors':
"""Produce vectors for all given keys as a new :class:`KeyedVectors` object.
Notes
-----
The keys will always be deduplicated. For optimal performance, you should not pass entire
corpora to the method. Instead, you should construct a dictionary of unique words in your
corpus:
>>> from collections import Counter
>>> import itertools
>>>
>>> from gensim.models import FastText
>>> from gensim.test.utils import datapath, common_texts
>>>
>>> model_corpus_file = datapath('lee_background.cor') # train word vectors on some corpus
>>> model = FastText(corpus_file=model_corpus_file, vector_size=20, min_count=1)
>>> corpus = common_texts # infer word vectors for words from another corpus
>>> word_counts = Counter(itertools.chain.from_iterable(corpus)) # count words in your corpus
>>> words_by_freq = (k for k, v in word_counts.most_common())
>>> word_vectors = model.wv.vectors_for_all(words_by_freq) # create word-vectors for words in your corpus
Parameters
----------
keys : iterable
The keys that will be vectorized.
allow_inference : bool, optional
In subclasses such as :class:`~gensim.models.fasttext.FastTextKeyedVectors`,
vectors for out-of-vocabulary keys (words) may be inferred. Default is True.
copy_vecattrs : bool, optional
Additional attributes set via the :meth:`KeyedVectors.set_vecattr` method
will be preserved in the produced :class:`KeyedVectors` object. Default is False.
To ensure that *all* the produced vectors will have vector attributes assigned,
you should set `allow_inference=False`.
Returns
-------
keyedvectors : :class:`~gensim.models.keyedvectors.KeyedVectors`
Vectors for all the given keys.
"""
# Pick only the keys that actually exist & deduplicate them.
# We keep the original key order, to improve cache locality, for performance.
vocab, seen = [], set()
for key in keys:
if key not in seen:
seen.add(key)
if key in (self if allow_inference else self.key_to_index):
vocab.append(key)
kv = KeyedVectors(self.vector_size, len(vocab), dtype=self.vectors.dtype)
for key in vocab: # produce and index vectors for all the given keys
weights = self[key]
_add_word_to_kv(kv, None, key, weights, len(vocab))
if copy_vecattrs:
for attr in self.expandos:
try:
kv.set_vecattr(key, attr, self.get_vecattr(key, attr))
except KeyError:
pass
return kv
def _upconvert_old_d2vkv(self):
"""Convert a deserialized older Doc2VecKeyedVectors instance to latest generic KeyedVectors"""
self.vocab = self.doctags
self._upconvert_old_vocab() # destroys 'vocab', fills 'key_to_index' & 'extras'
for k in self.key_to_index.keys():
old_offset = self.get_vecattr(k, 'offset')
true_index = old_offset + self.max_rawint + 1
self.key_to_index[k] = true_index
del self.expandos['offset'] # no longer needed
if self.max_rawint > -1:
self.index_to_key = list(range(0, self.max_rawint + 1)) + self.offset2doctag
else:
self.index_to_key = self.offset2doctag
self.vectors = self.vectors_docs
del self.doctags
del self.vectors_docs
del self.count
del self.max_rawint
del self.offset2doctag
def similarity_unseen_docs(self, *args, **kwargs):
raise NotImplementedError("Call similarity_unseen_docs on a Doc2Vec model instead.")
# to help 3.8.1 & older pickles load properly
Word2VecKeyedVectors = KeyedVectors
Doc2VecKeyedVectors = KeyedVectors
EuclideanKeyedVectors = KeyedVectors
class CompatVocab:
def __init__(self, **kwargs):
"""A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
Retained for now to ease the loading of older models.
"""
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s<%s>" % (self.__class__.__name__, ', '.join(vals))
# compatibility alias, allowing older pickle-based `.save()`s to load
Vocab = CompatVocab
# Functions for internal use by _load_word2vec_format function
def _add_word_to_kv(kv, counts, word, weights, vocab_size):
if kv.has_index_for(word):
logger.warning("duplicate word '%s' in word2vec file, ignoring all but first", word)
return
word_id = kv.add_vector(word, weights)
if counts is None:
# Most common scenario: no vocab file given. Just make up some bogus counts, in descending order.
# TODO (someday): make this faking optional, include more realistic (Zipf-based) fake numbers.
word_count = vocab_size - word_id
elif word in counts:
# use count from the vocab file
word_count = counts[word]
else:
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
word_count = None
kv.set_vecattr(word, 'count', word_count)
def _add_bytes_to_kv(kv, counts, chunk, vocab_size, vector_size, datatype, unicode_errors, encoding):
start = 0
processed_words = 0
bytes_per_vector = vector_size * dtype(REAL).itemsize
max_words = vocab_size - kv.next_index # don't read more than kv preallocated to hold
assert max_words > 0
for _ in range(max_words):
i_space = chunk.find(b' ', start)
i_vector = i_space + 1
if i_space == -1 or (len(chunk) - i_vector) < bytes_per_vector:
break
word = chunk[start:i_space].decode(encoding, errors=unicode_errors)
# Some binary files are reported to have obsolete new line in the beginning of word, remove it
word = word.lstrip('\n')
vector = frombuffer(chunk, offset=i_vector, count=vector_size, dtype=REAL).astype(datatype)
_add_word_to_kv(kv, counts, word, vector, vocab_size)
start = i_vector + bytes_per_vector
processed_words += 1
return processed_words, chunk[start:]
def _word2vec_read_binary(
fin, kv, counts, vocab_size, vector_size, datatype, unicode_errors, binary_chunk_size,
encoding="utf-8",
):
chunk = b''
tot_processed_words = 0
while tot_processed_words < vocab_size:
new_chunk = fin.read(binary_chunk_size)
chunk += new_chunk
processed_words, chunk = _add_bytes_to_kv(
kv, counts, chunk, vocab_size, vector_size, datatype, unicode_errors, encoding)
tot_processed_words += processed_words
if len(new_chunk) < binary_chunk_size:
break
if tot_processed_words != vocab_size:
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
def _word2vec_read_text(fin, kv, counts, vocab_size, vector_size, datatype, unicode_errors, encoding):
for line_no in range(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
word, weights = _word2vec_line_to_vector(line, datatype, unicode_errors, encoding)
_add_word_to_kv(kv, counts, word, weights, vocab_size)
def _word2vec_line_to_vector(line, datatype, unicode_errors, encoding):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
word, weights = parts[0], [datatype(x) for x in parts[1:]]
return word, weights
def _word2vec_detect_sizes_text(fin, limit, datatype, unicode_errors, encoding):
vector_size = None
for vocab_size in itertools.count():
line = fin.readline()
if line == b'' or vocab_size == limit: # EOF/max: return what we've got
break
if vector_size:
continue # don't bother parsing lines past the 1st
word, weights = _word2vec_line_to_vector(line, datatype, unicode_errors, encoding)
vector_size = len(weights)
return vocab_size, vector_size
def _load_word2vec_format(
cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=sys.maxsize, datatype=REAL, no_header=False, binary_chunk_size=100 * 1024,
):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary. Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
binary_chunk_size : int, optional
Read input file in chunks of this many bytes for performance reasons.
Returns
-------
object
Returns the loaded model as an instance of :class:`cls`.
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.open(fvocab, 'rb') as fin:
for line in fin:
word, count = utils.to_unicode(line, errors=unicode_errors).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.open(fname, 'rb') as fin:
if no_header:
# deduce both vocab_size & vector_size from 1st pass over file
if binary:
raise NotImplementedError("no_header only available for text-format files")
else: # text
vocab_size, vector_size = _word2vec_detect_sizes_text(fin, limit, datatype, unicode_errors, encoding)
fin.close()
fin = utils.open(fname, 'rb')
else:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = [int(x) for x in header.split()] # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
kv = cls(vector_size, vocab_size, dtype=datatype)
if binary:
_word2vec_read_binary(
fin, kv, counts, vocab_size, vector_size, datatype, unicode_errors, binary_chunk_size, encoding
)
else:
_word2vec_read_text(fin, kv, counts, vocab_size, vector_size, datatype, unicode_errors, encoding)
if kv.vectors.shape[0] != len(kv):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
kv.vectors.shape[0], len(kv),
)
kv.vectors = ascontiguousarray(kv.vectors[: len(kv)])
assert (len(kv), vector_size) == kv.vectors.shape
kv.add_lifecycle_event(
"load_word2vec_format",
msg=f"loaded {kv.vectors.shape} matrix of type {kv.vectors.dtype} from {fname}",
binary=binary, encoding=encoding,
)
return kv
def load_word2vec_format(*args, **kwargs):
"""Alias for :meth:`~gensim.models.keyedvectors.KeyedVectors.load_word2vec_format`."""
return KeyedVectors.load_word2vec_format(*args, **kwargs)
def pseudorandom_weak_vector(size, seed_string=None, hashfxn=hash):
"""Get a random vector, derived deterministically from `seed_string` if supplied.
Useful for initializing KeyedVectors that will be the starting projection/input layers of _2Vec models.
"""
if seed_string:
once = np.random.Generator(np.random.SFC64(hashfxn(seed_string) & 0xffffffff))
else:
once = utils.default_prng
return (once.random(size).astype(REAL) - 0.5) / size
def prep_vectors(target_shape, prior_vectors=None, seed=0, dtype=REAL):
"""Return a numpy array of the given shape. Reuse prior_vectors object or values
to extent possible. Initialize new values randomly if requested.
"""
if prior_vectors is None:
prior_vectors = np.zeros((0, 0))
if prior_vectors.shape == target_shape:
return prior_vectors
target_count, vector_size = target_shape
rng = np.random.default_rng(seed=seed) # use new instance of numpy's recommended generator/algorithm
new_vectors = rng.random(target_shape, dtype=dtype) # [0.0, 1.0)
new_vectors *= 2.0 # [0.0, 2.0)
new_vectors -= 1.0 # [-1.0, 1.0)
new_vectors /= vector_size
new_vectors[0:prior_vectors.shape[0], 0:prior_vectors.shape[1]] = prior_vectors
return new_vectors
| 92,030
|
Python
|
.py
| 1,769
| 41.787451
| 120
| 0.611656
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,127
|
_fasttext_bin.py
|
piskvorky_gensim/gensim/models/_fasttext_bin.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Michael Penkov <m@penkov.dev>
# Copyright (C) 2019 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Load models from the native binary format released by Facebook.
The main entry point is the :func:`~gensim.models._fasttext_bin.load` function.
It returns a :class:`~gensim.models._fasttext_bin.Model` namedtuple containing everything loaded from the binary.
Examples
--------
Load a model from a binary file:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.fasttext_bin import load
>>> with open(datapath('crime-and-punishment.bin'), 'rb') as fin:
... model = load(fin)
>>> model.nwords
291
>>> model.vectors_ngrams.shape
(391, 5)
>>> sorted(model.raw_vocab, key=lambda w: len(w), reverse=True)[:5]
['останавливаться', 'изворачиваться,', 'раздражительном', 'exceptionally', 'проскользнуть']
See Also
--------
`FB Implementation <https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc>`_.
"""
import collections
import gzip
import io
import logging
import struct
import numpy as np
_END_OF_WORD_MARKER = b'\x00'
# FastText dictionary data structure holds elements of type `entry` which can have `entry_type`
# either `word` (0 :: int8) or `label` (1 :: int8). Here we deal with unsupervised case only
# so we want `word` type.
# See https://github.com/facebookresearch/fastText/blob/master/src/dictionary.h
_DICT_WORD_ENTRY_TYPE_MARKER = b'\x00'
logger = logging.getLogger(__name__)
# Constants for FastText vesrion and FastText file format magic (both int32)
# https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc#L25
_FASTTEXT_VERSION = np.int32(12)
_FASTTEXT_FILEFORMAT_MAGIC = np.int32(793712314)
# _NEW_HEADER_FORMAT is constructed on the basis of args::save method, see
# https://github.com/facebookresearch/fastText/blob/master/src/args.cc
_NEW_HEADER_FORMAT = [
('dim', 'i'),
('ws', 'i'),
('epoch', 'i'),
('min_count', 'i'),
('neg', 'i'),
('word_ngrams', 'i'), # Unused in loading
('loss', 'i'),
('model', 'i'),
('bucket', 'i'),
('minn', 'i'),
('maxn', 'i'),
('lr_update_rate', 'i'), # Unused in loading
('t', 'd'),
]
_OLD_HEADER_FORMAT = [
('epoch', 'i'),
('min_count', 'i'),
('neg', 'i'),
('word_ngrams', 'i'), # Unused in loading
('loss', 'i'),
('model', 'i'),
('bucket', 'i'),
('minn', 'i'),
('maxn', 'i'),
('lr_update_rate', 'i'), # Unused in loading
('t', 'd'),
]
_FLOAT_SIZE = struct.calcsize('@f')
if _FLOAT_SIZE == 4:
_FLOAT_DTYPE = np.dtype(np.float32)
elif _FLOAT_SIZE == 8:
_FLOAT_DTYPE = np.dtype(np.float64)
else:
_FLOAT_DTYPE = None
def _yield_field_names():
for name, _ in _OLD_HEADER_FORMAT + _NEW_HEADER_FORMAT:
if not name.startswith('_'):
yield name
yield 'raw_vocab'
yield 'vocab_size'
yield 'nwords'
yield 'vectors_ngrams'
yield 'hidden_output'
yield 'ntokens'
_FIELD_NAMES = sorted(set(_yield_field_names()))
Model = collections.namedtuple('Model', _FIELD_NAMES)
"""Holds data loaded from the Facebook binary.
Parameters
----------
dim : int
The dimensionality of the vectors.
ws : int
The window size.
epoch : int
The number of training epochs.
neg : int
If non-zero, indicates that the model uses negative sampling.
loss : int
If equal to 1, indicates that the model uses hierarchical sampling.
model : int
If equal to 2, indicates that the model uses skip-grams.
bucket : int
The number of buckets.
min_count : int
The threshold below which the model ignores terms.
t : float
The sample threshold.
minn : int
The minimum ngram length.
maxn : int
The maximum ngram length.
raw_vocab : collections.OrderedDict
A map from words (str) to their frequency (int). The order in the dict
corresponds to the order of the words in the Facebook binary.
nwords : int
The number of words.
vocab_size : int
The size of the vocabulary.
vectors_ngrams : numpy.array
This is a matrix that contains vectors learned by the model.
Each row corresponds to a vector.
The number of vectors is equal to the number of words plus the number of buckets.
The number of columns is equal to the vector dimensionality.
hidden_output : numpy.array
This is a matrix that contains the shallow neural network output.
This array has the same dimensions as vectors_ngrams.
May be None - in that case, it is impossible to continue training the model.
"""
def _struct_unpack(fin, fmt):
num_bytes = struct.calcsize(fmt)
return struct.unpack(fmt, fin.read(num_bytes))
def _load_vocab(fin, new_format, encoding='utf-8'):
"""Load a vocabulary from a FB binary.
Before the vocab is ready for use, call the prepare_vocab function and pass
in the relevant parameters from the model.
Parameters
----------
fin : file
An open file pointer to the binary.
new_format: boolean
True if the binary is of the newer format.
encoding : str
The encoding to use when decoding binary data into words.
Returns
-------
tuple
The loaded vocabulary. Keys are words, values are counts.
The vocabulary size.
The number of words.
The number of tokens.
"""
vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, fin.name)
ntokens = _struct_unpack(fin, '@q')[0] # number of tokens
if new_format:
pruneidx_size, = _struct_unpack(fin, '@q')
raw_vocab = collections.OrderedDict()
for i in range(vocab_size):
word_bytes = io.BytesIO()
char_byte = fin.read(1)
while char_byte != _END_OF_WORD_MARKER:
word_bytes.write(char_byte)
char_byte = fin.read(1)
word_bytes = word_bytes.getvalue()
try:
word = word_bytes.decode(encoding)
except UnicodeDecodeError:
word = word_bytes.decode(encoding, errors='backslashreplace')
logger.error(
'failed to decode invalid unicode bytes %r; replacing invalid characters, using %r',
word_bytes, word
)
count, _ = _struct_unpack(fin, '@qb')
raw_vocab[word] = count
if new_format:
for j in range(pruneidx_size):
_struct_unpack(fin, '@2i')
return raw_vocab, vocab_size, nwords, ntokens
def _load_matrix(fin, new_format=True):
"""Load a matrix from fastText native format.
Interprets the matrix dimensions and type from the file stream.
Parameters
----------
fin : file
A file handle opened for reading.
new_format : bool, optional
True if the quant_input variable precedes
the matrix declaration. Should be True for newer versions of fastText.
Returns
-------
:class:`numpy.array`
The vectors as an array.
Each vector will be a row in the array.
The number of columns of the array will correspond to the vector size.
"""
if _FLOAT_DTYPE is None:
raise ValueError('bad _FLOAT_SIZE: %r' % _FLOAT_SIZE)
if new_format:
_struct_unpack(fin, '@?') # bool quant_input in fasttext.cc
num_vectors, dim = _struct_unpack(fin, '@2q')
count = num_vectors * dim
#
# numpy.fromfile doesn't play well with gzip.GzipFile as input:
#
# - https://github.com/RaRe-Technologies/gensim/pull/2476
# - https://github.com/numpy/numpy/issues/13470
#
# Until they fix it, we have to apply a workaround. We only apply the
# workaround when it's necessary, because np.fromfile is heavily optimized
# and very efficient (when it works).
#
if isinstance(fin, gzip.GzipFile):
logger.warning(
'Loading model from a compressed .gz file. This can be slow. '
'This is a work-around for a bug in NumPy: https://github.com/numpy/numpy/issues/13470. '
'Consider decompressing your model file for a faster load. '
)
matrix = _fromfile(fin, _FLOAT_DTYPE, count)
else:
matrix = np.fromfile(fin, _FLOAT_DTYPE, count)
assert matrix.shape == (count,), 'expected (%r,), got %r' % (count, matrix.shape)
matrix = matrix.reshape((num_vectors, dim))
return matrix
def _batched_generator(fin, count, batch_size=1e6):
"""Read `count` floats from `fin`.
Batches up read calls to avoid I/O overhead. Keeps no more than batch_size
floats in memory at once.
Yields floats.
"""
while count > batch_size:
batch = _struct_unpack(fin, '@%df' % batch_size)
for f in batch:
yield f
count -= batch_size
batch = _struct_unpack(fin, '@%df' % count)
for f in batch:
yield f
def _fromfile(fin, dtype, count):
"""Reimplementation of numpy.fromfile."""
return np.fromiter(_batched_generator(fin, count), dtype=dtype)
def load(fin, encoding='utf-8', full_model=True):
"""Load a model from a binary stream.
Parameters
----------
fin : file
The readable binary stream.
encoding : str, optional
The encoding to use for decoding text
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class:`~gensim.models._fasttext_bin.Model`
The loaded model.
"""
if isinstance(fin, str):
fin = open(fin, 'rb')
magic, version = _struct_unpack(fin, '@2i')
new_format = magic == _FASTTEXT_FILEFORMAT_MAGIC
header_spec = _NEW_HEADER_FORMAT if new_format else _OLD_HEADER_FORMAT
model = {name: _struct_unpack(fin, fmt)[0] for (name, fmt) in header_spec}
if not new_format:
model.update(dim=magic, ws=version)
raw_vocab, vocab_size, nwords, ntokens = _load_vocab(fin, new_format, encoding=encoding)
model.update(raw_vocab=raw_vocab, vocab_size=vocab_size, nwords=nwords, ntokens=ntokens)
vectors_ngrams = _load_matrix(fin, new_format=new_format)
if not full_model:
hidden_output = None
else:
hidden_output = _load_matrix(fin, new_format=new_format)
assert fin.read() == b'', 'expected to reach EOF'
model.update(vectors_ngrams=vectors_ngrams, hidden_output=hidden_output)
model = {k: v for k, v in model.items() if k in _FIELD_NAMES}
return Model(**model)
def _backslashreplace_backport(ex):
"""Replace byte sequences that failed to decode with character escapes.
Does the same thing as errors="backslashreplace" from Python 3. Python 2
lacks this functionality out of the box, so we need to backport it.
Parameters
----------
ex: UnicodeDecodeError
contains arguments of the string and start/end indexes of the bad portion.
Returns
-------
text: unicode
The Unicode string corresponding to the decoding of the bad section.
end: int
The index from which to continue decoding.
Note
----
Works on Py2 only. Py3 already has backslashreplace built-in.
"""
#
# Based on:
# https://stackoverflow.com/questions/42860186/exact-equivalent-of-b-decodeutf-8-backslashreplace-in-python-2
#
bstr, start, end = ex.object, ex.start, ex.end
text = u''.join('\\x{:02x}'.format(ord(c)) for c in bstr[start:end])
return text, end
def _sign_model(fout):
"""
Write signature of the file in Facebook's native fastText `.bin` format
to the binary output stream `fout`. Signature includes magic bytes and version.
Name mimics original C++ implementation, see
[FastText::signModel](https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc)
Parameters
----------
fout: writeable binary stream
"""
fout.write(_FASTTEXT_FILEFORMAT_MAGIC.tobytes())
fout.write(_FASTTEXT_VERSION.tobytes())
def _conv_field_to_bytes(field_value, field_type):
"""
Auxiliary function that converts `field_value` to bytes based on request `field_type`,
for saving to the binary file.
Parameters
----------
field_value: numerical
contains arguments of the string and start/end indexes of the bad portion.
field_type: str
currently supported `field_types` are `i` for 32-bit integer and `d` for 64-bit float
"""
if field_type == 'i':
return (np.int32(field_value).tobytes())
elif field_type == 'd':
return (np.float64(field_value).tobytes())
else:
raise NotImplementedError('Currently conversion to "%s" type is not implemmented.' % field_type)
def _get_field_from_model(model, field):
"""
Extract `field` from `model`.
Parameters
----------
model: gensim.models.fasttext.FastText
model from which `field` is extracted
field: str
requested field name, fields are listed in the `_NEW_HEADER_FORMAT` list
"""
if field == 'bucket':
return model.wv.bucket
elif field == 'dim':
return model.vector_size
elif field == 'epoch':
return model.epochs
elif field == 'loss':
# `loss` => hs: 1, ns: 2, softmax: 3, ova-vs-all: 4
# ns = negative sampling loss (default)
# hs = hierarchical softmax loss
# softmax = softmax loss
# one-vs-all = one vs all loss (supervised)
if model.hs == 1:
return 1
elif model.hs == 0:
return 2
elif model.hs == 0 and model.negative == 0:
return 1
elif field == 'maxn':
return model.wv.max_n
elif field == 'minn':
return model.wv.min_n
elif field == 'min_count':
return model.min_count
elif field == 'model':
# `model` => cbow:1, sg:2, sup:3
# cbow = continous bag of words (default)
# sg = skip-gram
# sup = supervised
return 2 if model.sg == 1 else 1
elif field == 'neg':
return model.negative
elif field == 't':
return model.sample
elif field == 'word_ngrams':
# This is skipped in gensim loading setting, using the default from FB C++ code
return 1
elif field == 'ws':
return model.window
elif field == 'lr_update_rate':
# This is skipped in gensim loading setting, using the default from FB C++ code
return 100
else:
msg = 'Extraction of header field "' + field + '" from Gensim FastText object not implemmented.'
raise NotImplementedError(msg)
def _args_save(fout, model, fb_fasttext_parameters):
"""
Saves header with `model` parameters to the binary stream `fout` containing a model in the Facebook's
native fastText `.bin` format.
Name mimics original C++ implementation, see
[Args::save](https://github.com/facebookresearch/fastText/blob/master/src/args.cc)
Parameters
----------
fout: writeable binary stream
stream to which model is saved
model: gensim.models.fasttext.FastText
saved model
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
"""
for field, field_type in _NEW_HEADER_FORMAT:
if field in fb_fasttext_parameters:
field_value = fb_fasttext_parameters[field]
else:
field_value = _get_field_from_model(model, field)
fout.write(_conv_field_to_bytes(field_value, field_type))
def _dict_save(fout, model, encoding):
"""
Saves the dictionary from `model` to the to the binary stream `fout` containing a model in the Facebook's
native fastText `.bin` format.
Name mimics the original C++ implementation
[Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
Parameters
----------
fout: writeable binary stream
stream to which the dictionary from the model is saved
model: gensim.models.fasttext.FastText
the model that contains the dictionary to save
encoding: str
string encoding used in the output
"""
# In the FB format the dictionary can contain two types of entries, i.e.
# words and labels. The first two fields of the dictionary contain
# the dictionary size (size_) and the number of words (nwords_).
# In the unsupervised case we have only words (no labels). Hence both fields
# are equal.
fout.write(np.int32(len(model.wv)).tobytes())
fout.write(np.int32(len(model.wv)).tobytes())
# nlabels=0 <- no labels we are in unsupervised mode
fout.write(np.int32(0).tobytes())
fout.write(np.int64(model.corpus_total_words).tobytes())
# prunedidx_size_=-1, -1 value denotes no prunning index (prunning is only supported in supervised mode)
fout.write(np.int64(-1))
for word in model.wv.index_to_key:
word_count = model.wv.get_vecattr(word, 'count')
fout.write(word.encode(encoding))
fout.write(_END_OF_WORD_MARKER)
fout.write(np.int64(word_count).tobytes())
fout.write(_DICT_WORD_ENTRY_TYPE_MARKER)
# We are in unsupervised case, therefore pruned_idx is empty, so we do not need to write anything else
def _input_save(fout, model):
"""
Saves word and ngram vectors from `model` to the binary stream `fout` containing a model in
the Facebook's native fastText `.bin` format.
Corresponding C++ fastText code:
[DenseMatrix::save](https://github.com/facebookresearch/fastText/blob/master/src/densematrix.cc)
Parameters
----------
fout: writeable binary stream
stream to which the vectors are saved
model: gensim.models.fasttext.FastText
the model that contains the vectors to save
"""
vocab_n, vocab_dim = model.wv.vectors_vocab.shape
ngrams_n, ngrams_dim = model.wv.vectors_ngrams.shape
assert vocab_dim == ngrams_dim
assert vocab_n == len(model.wv)
assert ngrams_n == model.wv.bucket
fout.write(struct.pack('@2q', vocab_n + ngrams_n, vocab_dim))
fout.write(model.wv.vectors_vocab.tobytes())
fout.write(model.wv.vectors_ngrams.tobytes())
def _output_save(fout, model):
"""
Saves output layer of `model` to the binary stream `fout` containing a model in
the Facebook's native fastText `.bin` format.
Corresponding C++ fastText code:
[DenseMatrix::save](https://github.com/facebookresearch/fastText/blob/master/src/densematrix.cc)
Parameters
----------
fout: writeable binary stream
the model that contains the output layer to save
model: gensim.models.fasttext.FastText
saved model
"""
if model.hs:
hidden_output = model.syn1
if model.negative:
hidden_output = model.syn1neg
hidden_n, hidden_dim = hidden_output.shape
fout.write(struct.pack('@2q', hidden_n, hidden_dim))
fout.write(hidden_output.tobytes())
def _save_to_stream(model, fout, fb_fasttext_parameters, encoding):
"""
Saves word embeddings to binary stream `fout` using the Facebook's native fasttext `.bin` format.
Parameters
----------
fout: file name or writeable binary stream
stream to which the word embeddings are saved
model: gensim.models.fasttext.FastText
the model that contains the word embeddings to save
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
encoding: str
encoding used in the output file
"""
_sign_model(fout)
_args_save(fout, model, fb_fasttext_parameters)
_dict_save(fout, model, encoding)
fout.write(struct.pack('@?', False)) # Save 'quant_', which is False for unsupervised models
# Save words and ngrams vectors
_input_save(fout, model)
fout.write(struct.pack('@?', False)) # Save 'quot_', which is False for unsupervised models
# Save output layers of the model
_output_save(fout, model)
def save(model, fout, fb_fasttext_parameters, encoding):
"""
Saves word embeddings to the Facebook's native fasttext `.bin` format.
Parameters
----------
fout: file name or writeable binary stream
stream to which model is saved
model: gensim.models.fasttext.FastText
saved model
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
encoding: str
encoding used in the output file
Notes
-----
Unfortunately, there is no documentation of the Facebook's native fasttext `.bin` format
This is just reimplementation of
[FastText::saveModel](https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc)
Based on v0.9.1, more precisely commit da2745fcccb848c7a225a7d558218ee4c64d5333
Code follows the original C++ code naming.
"""
if isinstance(fout, str):
with open(fout, "wb") as fout_stream:
_save_to_stream(model, fout_stream, fb_fasttext_parameters, encoding)
else:
_save_to_stream(model, fout, fb_fasttext_parameters, encoding)
| 21,830
|
Python
|
.py
| 542
| 34.374539
| 116
| 0.670032
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,128
|
lsi_worker.py
|
piskvorky_gensim/gensim/models/lsi_worker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Worker ("slave") process used in computing distributed Latent Semantic Indexing (LSI,
:class:`~gensim.models.lsimodel.LsiModel`) models.
Run this script on every node in your cluster. If you wish, you may even run it multiple times on a single machine,
to make better use of multiple cores (just beware that memory footprint increases linearly).
How to use distributed LSI
--------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lsi_worker &
#. Run dispatcher ::
python -m gensim.models.lsi_dispatcher &
#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lsi_worker --help
:ellipsis: 0, -3
"""
import os
import sys
import logging
import argparse
import threading
import tempfile
import queue as Queue
import Pyro4
from gensim.models import lsimodel
from gensim import utils
logger = logging.getLogger(__name__)
SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
class Worker:
def __init__(self):
"""Partly initialize the model.
A full initialization requires a call to :meth:`~gensim.models.lsi_worker.Worker.initialize`.
"""
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
"""Fully initialize the worker.
Parameters
----------
myid : int
An ID number used to identify this worker in the dispatcher object.
dispatcher : :class:`~gensim.models.lsi_dispatcher.Dispatcher`
The dispatcher responsible for scheduling this worker.
**model_params
Keyword parameters to initialize the inner LSI model, see :class:`~gensim.models.lsimodel.LsiModel`.
"""
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
# id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.myid = myid
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s", myid)
self.model = lsimodel.LsiModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""Request jobs from the dispatcher, in a perpetual loop until :meth:`~gensim.models.lsi_worker.Worker.getstate`
is called.
Raises
------
RuntimeError
If `self.model` is None (i.e. worker not initialized).
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i", self.myid, self.jobsdone)
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs", self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
"""Incrementally process the job and potentially logs progress.
Parameters
----------
job : iterable of list of (int, float)
Corpus in BoW format.
"""
self.model.add_documents(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
"""Log and get the LSI model's current projection.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
The current projection.
"""
logger.info("worker #%i returning its state after %s jobs", self.myid, self.jobsdone)
assert isinstance(self.model.projection, lsimodel.Projection)
self.finished = True
return self.model.projection
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self):
"""Reset the worker by deleting its current projection."""
logger.info("resetting worker #%i", self.myid)
self.model.projection = self.model.projection.empty_like()
self.finished = False
@Pyro4.oneway
def exit(self):
"""Terminate the worker."""
logger.info("terminating worker #%i", self.myid)
os._exit(0)
if __name__ == '__main__':
"""The main script. """
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
_ = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
logger.info("finished running %s", parser.prog)
| 5,915
|
Python
|
.py
| 142
| 34.556338
| 120
| 0.655016
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,129
|
logentropy_model.py
|
piskvorky_gensim/gensim/models/logentropy_model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module allows simple Bag of Words (BoW) represented corpus to be transformed into log entropy space.
It implements Log Entropy Model that produces entropy-weighted logarithmic term frequency representation.
Empirical study by Lee et al. 2015 [1]_ suggests log entropy-weighted model yields better results among other forms of
representation.
References
----------
.. [1] Lee et al. 2005. An Empirical Evaluation of Models of Text Document Similarity.
https://escholarship.org/uc/item/48g155nq
"""
import logging
import math
from gensim import interfaces, matutils, utils
logger = logging.getLogger(__name__)
class LogEntropyModel(interfaces.TransformationABC):
r"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted matrix (positive floats).
This is done by a log entropy normalization, optionally normalizing the resulting documents to unit length.
The following formulas explain how o compute the log entropy weight for term :math:`i` in document :math:`j`:
.. math::
local\_weight_{i,j} = log(frequency_{i,j} + 1)
P_{i,j} = \frac{frequency_{i,j}}{\sum_j frequency_{i,j}}
global\_weight_i = 1 + \frac{\sum_j P_{i,j} * log(P_{i,j})}{log(number\_of\_documents + 1)}
final\_weight_{i,j} = local\_weight_{i,j} * global\_weight_i
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import LogEntropyModel
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>>
>>> dct = Dictionary(common_texts) # fit dictionary
>>> corpus = [dct.doc2bow(row) for row in common_texts] # convert to BoW format
>>> model = LogEntropyModel(corpus) # fit model
>>> vector = model[corpus[1]] # apply model to document
"""
def __init__(self, corpus, normalize=True):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus in BoW format.
normalize : bool, optional
If True, the resulted log entropy weighted vector will be normalized to length of 1,
If False - do nothing.
"""
self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "%s<n_docs=%s, n_words=%s>" % (self.__class__.__name__, self.n_docs, self.n_words)
def initialize(self, corpus):
"""Calculates the global weighting for all terms in a given corpus and transforms the simple
count representation into the log entropy normalized space.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus is BoW format
"""
logger.info("calculating counts")
glob_freq = {}
glob_num_words, doc_no = 0, -1
for doc_no, bow in enumerate(corpus):
if doc_no % 10000 == 0:
logger.info("PROGRESS: processing document #%i", doc_no)
glob_num_words += len(bow)
for term_id, term_count in bow:
glob_freq[term_id] = glob_freq.get(term_id, 0) + term_count
# keep some stats about the training corpus
self.n_docs = doc_no + 1
self.n_words = glob_num_words
# and finally compute the global weights
logger.info(
"calculating global log entropy weights for %i documents and %i features (%i matrix non-zeros)",
self.n_docs, len(glob_freq), self.n_words
)
logger.debug('iterating over corpus')
# initialize doc_no2 index in case corpus is empty
doc_no2 = 0
for doc_no2, bow in enumerate(corpus):
for key, freq in bow:
p = (float(freq) / glob_freq[key]) * math.log(float(freq) / glob_freq[key])
self.entr[key] = self.entr.get(key, 0.0) + p
if doc_no2 != doc_no:
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = 1 + self.entr[key] / math.log(self.n_docs + 1)
def __getitem__(self, bow):
"""Get log entropy representation of the input vector and/or corpus.
Parameters
----------
bow : list of (int, int)
Document in BoW format.
Returns
-------
list of (int, float)
Log-entropy vector for passed `bow`.
"""
# if the input vector is in fact a corpus, return a transformed corpus
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge)
vector = [
(term_id, math.log(tf + 1) * self.entr.get(term_id))
for term_id, tf in bow
if term_id in self.entr
]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
| 5,329
|
Python
|
.py
| 118
| 36.525424
| 118
| 0.616187
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,130
|
doc2vec.py
|
piskvorky_gensim/gensim/models/doc2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gensim Contributors
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
Introduction
============
Learn paragraph and document embeddings via the distributed memory and distributed bag of words models from
`Quoc Le and Tomas Mikolov: "Distributed Representations of Sentences and Documents"
<http://arxiv.org/pdf/1405.4053v2.pdf>`_.
The algorithms use either hierarchical softmax or negative sampling; see
`Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean: "Efficient Estimation of Word Representations in
Vector Space, in Proceedings of Workshop at ICLR, 2013" <https://arxiv.org/pdf/1301.3781.pdf>`_ and
`Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean: "Distributed Representations of Words
and Phrases and their Compositionality. In Proceedings of NIPS, 2013"
<https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf>`_.
For a usage example, see the `Doc2vec tutorial
<https://radimrehurek.com/gensim/auto_examples/tutorials/run_doc2vec_lee.html#sphx-glr-auto-examples-tutorials-run-doc2vec-lee-py>`_.
**Make sure you have a C compiler before installing Gensim, to use the optimized doc2vec routines** (70x speedup
compared to plain NumPy implementation, https://rare-technologies.com/parallelizing-word2vec-in-python/).
Usage examples
==============
Initialize & train a model:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models.doc2vec import Doc2Vec, TaggedDocument
>>>
>>> documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(common_texts)]
>>> model = Doc2Vec(documents, vector_size=5, window=2, min_count=1, workers=4)
Persist a model to disk:
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile
>>>
>>> fname = get_tmpfile("my_doc2vec_model")
>>>
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
Infer vector for a new document:
.. sourcecode:: pycon
>>> vector = model.infer_vector(["system", "response"])
"""
import logging
import os
from collections import namedtuple, defaultdict
from collections.abc import Iterable
from timeit import default_timer
from dataclasses import dataclass
from numpy import zeros, float32 as REAL, vstack, integer, dtype
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.utils import deprecated
from gensim.models import Word2Vec, FAST_VERSION # noqa: F401
from gensim.models.keyedvectors import KeyedVectors, pseudorandom_weak_vector
logger = logging.getLogger(__name__)
try:
from gensim.models.doc2vec_inner import train_document_dbow, train_document_dm, train_document_dm_concat
except ImportError:
raise utils.NO_CYTHON
try:
from gensim.models.doc2vec_corpusfile import (
d2v_train_epoch_dbow,
d2v_train_epoch_dm_concat,
d2v_train_epoch_dm,
CORPUSFILE_VERSION
)
except ImportError:
# corpusfile doc2vec is not supported
CORPUSFILE_VERSION = -1
def d2v_train_epoch_dbow(model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch, _expected_examples,
_expected_words, work, _neu1, docvecs_count, word_vectors=None, word_locks=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
doctag_vectors=None, doctag_locks=None):
raise NotImplementedError("Training with corpus_file argument is not supported.")
def d2v_train_epoch_dm_concat(model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch,
_expected_examples, _expected_words, work, _neu1, docvecs_count, word_vectors=None,
word_locks=None, learn_doctags=True, learn_words=True, learn_hidden=True,
doctag_vectors=None, doctag_locks=None):
raise NotImplementedError("Training with corpus_file argument is not supported.")
def d2v_train_epoch_dm(model, corpus_file, offset, start_doctag, _cython_vocab, _cur_epoch, _expected_examples,
_expected_words, work, _neu1, docvecs_count, word_vectors=None, word_locks=None,
learn_doctags=True, learn_words=True, learn_hidden=True, doctag_vectors=None,
doctag_locks=None):
raise NotImplementedError("Training with corpus_file argument is not supported.")
class TaggedDocument(namedtuple('TaggedDocument', 'words tags')):
"""Represents a document along with a tag, input document format for :class:`~gensim.models.doc2vec.Doc2Vec`.
A single document, made up of `words` (a list of unicode string tokens) and `tags` (a list of tokens).
Tags may be one or more unicode string tokens, but typical practice (which will also be the most memory-efficient)
is for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from :class:`gensim.models.word2vec.Word2Vec`.
"""
def __str__(self):
"""Human readable representation of the object's state, used for debugging.
Returns
-------
str
Human readable representation of the object's state (words and tags).
"""
return '%s<%s, %s>' % (self.__class__.__name__, self.words, self.tags)
@dataclass
class Doctag:
"""A dataclass shape-compatible with keyedvectors.SimpleVocab, extended to record
details of string document tags discovered during the initial vocabulary scan.
Will not be used if all presented document tags are ints. No longer used in a
completed model: just used during initial scan, and for backward compatibility.
"""
__slots__ = ('doc_count', 'index', 'word_count')
doc_count: int # number of docs where tag appeared
index: int # position in underlying array
word_count: int # number of words in associated docs
@property
def count(self):
return self.doc_count
@count.setter
def count(self, new_val):
self.doc_count = new_val
class Doc2Vec(Word2Vec):
def __init__(
self, documents=None, corpus_file=None, vector_size=100, dm_mean=None, dm=1, dbow_words=0, dm_concat=0,
dm_tag_count=1, dv=None, dv_mapfile=None, comment=None, trim_rule=None, callbacks=(),
window=5, epochs=10, shrink_windows=True, **kwargs,
):
"""Class for training, using and evaluating neural networks described in
`Distributed Representations of Sentences and Documents <http://arxiv.org/abs/1405.4053v2>`_.
Parameters
----------
documents : iterable of list of :class:`~gensim.models.doc2vec.TaggedDocument`, optional
Input corpus, can be simply a list of elements, but for larger corpora,consider an iterable that streams
the documents directly from disk/network. If you don't supply `documents` (or `corpus_file`), the model is
left uninitialized -- use if you plan to initialize it in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `documents` to get performance boost. Only one of `documents` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left uninitialized).
Documents' tags are assigned automatically and are equal to line number, as in
:class:`~gensim.models.doc2vec.TaggedLineDocument`.
dm : {1,0}, optional
Defines the training algorithm. If `dm=1`, 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
vector_size : int, optional
Dimensionality of the feature vectors.
window : int, optional
The maximum distance between the current and predicted word within a sentence.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling.
In Python 3, reproducibility between interpreter launches also requires use of the `PYTHONHASHSEED`
environment variable to control hash randomization.
min_count : int, optional
Ignores all words with total frequency lower than this.
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
epochs : int, optional
Number of iterations (epochs) over the corpus. Defaults to 10 for Doc2Vec.
hs : {1,0}, optional
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
dm_mean : {1,0}, optional
If 0, use the sum of the context word vectors. If 1, use the mean.
Only applies when `dm` is used in non-concatenative mode.
dm_concat : {1,0}, optional
If 1, use concatenation of context vectors rather than sum/average;
Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmetically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
dm_tag_count : int, optional
Expected constant number of document tags per document, when using
dm_concat mode.
dbow_words : {1,0}, optional
If set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; If 0, only trains doc-vectors (faster).
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`, optional
List of callbacks that need to be executed/run at specific stages during training.
shrink_windows : bool, optional
New in 4.1. Experimental.
If True, the effective window size is uniformly sampled from [1, `window`]
for each target word during training, to match the original word2vec algorithm's
approximate weighting of context words by distance. Otherwise, the effective
window size is always fixed to `window` words to either side.
Some important internal attributes are the following:
Attributes
----------
wv : :class:`~gensim.models.keyedvectors.KeyedVectors`
This object essentially contains the mapping between words and embeddings. After training, it can be used
directly to query those embeddings in various ways. See the module level docstring for examples.
dv : :class:`~gensim.models.keyedvectors.KeyedVectors`
This object contains the paragraph vectors learned from the training data. There will be one such vector
for each unique document tag supplied during training. They may be individually accessed using the tag
as an indexed-access key. For example, if one of the training documents used a tag of 'doc003':
.. sourcecode:: pycon
>>> model.dv['doc003']
"""
corpus_iterable = documents
if dm_mean is not None:
self.cbow_mean = dm_mean
self.dbow_words = int(dbow_words)
self.dm_concat = int(dm_concat)
self.dm_tag_count = int(dm_tag_count)
if dm and dm_concat:
self.layer1_size = (dm_tag_count + (2 * window)) * vector_size
logger.info("using concatenative %d-dimensional layer1", self.layer1_size)
self.vector_size = vector_size
self.dv = dv or KeyedVectors(self.vector_size, mapfile_path=dv_mapfile)
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as desired after any vocab growth
self.dv.vectors_lockf = np.ones(1, dtype=REAL) # 0.0 values suppress word-backprop-updates; 1.0 allows
super(Doc2Vec, self).__init__(
sentences=corpus_iterable,
corpus_file=corpus_file,
vector_size=self.vector_size,
sg=(1 + dm) % 2,
null_word=self.dm_concat,
callbacks=callbacks,
window=window,
epochs=epochs,
shrink_windows=shrink_windows,
**kwargs,
)
@property
def dm(self):
"""Indicates whether 'distributed memory' (PV-DM) will be used, else 'distributed bag of words'
(PV-DBOW) is used.
"""
return not self.sg # opposite of SG
@property
def dbow(self):
"""Indicates whether 'distributed bag of words' (PV-DBOW) will be used, else 'distributed memory'
(PV-DM) is used.
"""
return self.sg # same as SG
@property
@deprecated("The `docvecs` property has been renamed `dv`.")
def docvecs(self):
return self.dv
@docvecs.setter
@deprecated("The `docvecs` property has been renamed `dv`.")
def docvecs(self, value):
self.dv = value
def _clear_post_train(self):
"""Resets the current word vectors. """
self.wv.norms = None
self.dv.norms = None
def init_weights(self):
super(Doc2Vec, self).init_weights()
# to not use an identical rnd stream as words, deterministically change seed (w/ 1000th prime)
self.dv.resize_vectors(seed=self.seed + 7919)
def reset_from(self, other_model):
"""Copy shareable data structures from another (possibly pre-trained) model.
This specifically causes some structures to be shared, so is limited to
structures (like those rleated to the known word/tag vocabularies) that
won't change during training or thereafter. Beware vocabulary edits/updates
to either model afterwards: the partial sharing and out-of-band modification
may leave the other model in a broken state.
Parameters
----------
other_model : :class:`~gensim.models.doc2vec.Doc2Vec`
Other model whose internal data structures will be copied over to the current object.
"""
self.wv.key_to_index = other_model.wv.key_to_index
self.wv.index_to_key = other_model.wv.index_to_key
self.wv.expandos = other_model.wv.expandos
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.dv.key_to_index = other_model.dv.key_to_index
self.dv.index_to_key = other_model.dv.index_to_key
self.dv.expandos = other_model.dv.expandos
self.init_weights()
def _do_train_epoch(
self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, offsets=None, start_doctags=None, **kwargs
):
work, neu1 = thread_private_mem
doctag_vectors = self.dv.vectors
doctags_lockf = self.dv.vectors_lockf
offset = offsets[thread_id]
start_doctag = start_doctags[thread_id]
if self.sg:
examples, tally, raw_tally = d2v_train_epoch_dbow(
self, corpus_file, offset, start_doctag, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, len(self.dv),
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf, train_words=self.dbow_words)
elif self.dm_concat:
examples, tally, raw_tally = d2v_train_epoch_dm_concat(
self, corpus_file, offset, start_doctag, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, len(self.dv),
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf)
else:
examples, tally, raw_tally = d2v_train_epoch_dm(
self, corpus_file, offset, start_doctag, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, len(self.dv),
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf)
return examples, tally, raw_tally
def _do_train_job(self, job, alpha, inits):
"""Train model using `job` data.
Parameters
----------
job : iterable of list of :class:`~gensim.models.doc2vec.TaggedDocument`
The corpus chunk to be used for training this batch.
alpha : float
Learning rate to be used for training this batch.
inits : (np.ndarray, np.ndarray)
Each worker threads private work memory.
Returns
-------
(int, int)
2-tuple (effective word count after ignoring unknown words and sentence length trimming, total word count).
"""
work, neu1 = inits
tally = 0
for doc in job:
doctag_indexes = [self.dv.get_index(tag) for tag in doc.tags if tag in self.dv]
doctag_vectors = self.dv.vectors
doctags_lockf = self.dv.vectors_lockf
if self.sg:
tally += train_document_dbow(
self, doc.words, doctag_indexes, alpha, work, train_words=self.dbow_words,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
elif self.dm_concat:
tally += train_document_dm_concat(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
else:
tally += train_document_dm(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
return tally, self._raw_word_count(job)
def train(
self, corpus_iterable=None, corpus_file=None, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None,
word_count=0, queue_factor=2, report_delay=1.0, callbacks=(),
**kwargs,
):
"""Update the model's neural weights.
To support linear learning-rate decay from (initial) `alpha` to `min_alpha`, and accurate
progress-percentage logging, either `total_examples` (count of documents) or `total_words` (count of
raw words in documents) **MUST** be provided. If `documents` is the same corpus
that was provided to :meth:`~gensim.models.word2vec.Word2Vec.build_vocab` earlier,
you can simply use `total_examples=self.corpus_count`.
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case
where :meth:`~gensim.models.word2vec.Word2Vec.train` is only called once,
you can set `epochs=self.iter`.
Parameters
----------
corpus_iterable : iterable of list of :class:`~gensim.models.doc2vec.TaggedDocument`, optional
Can be simply a list of elements, but for larger corpora,consider an iterable that streams
the documents directly from disk/network. If you don't supply `documents` (or `corpus_file`), the model is
left uninitialized -- use if you plan to initialize it in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `documents` to get performance boost. Only one of `documents` or
`corpus_file` arguments need to be passed (not both of them). Documents' tags are assigned automatically
and are equal to line number, as in :class:`~gensim.models.doc2vec.TaggedLineDocument`.
total_examples : int, optional
Count of documents.
total_words : int, optional
Count of raw words in documents.
epochs : int, optional
Number of iterations (epochs) over the corpus.
start_alpha : float, optional
Initial learning rate. If supplied, replaces the starting `alpha` from the constructor,
for this one call to `train`.
Use only if making multiple calls to `train`, when you want to manage the alpha learning-rate yourself
(not recommended).
end_alpha : float, optional
Final learning rate. Drops linearly from `start_alpha`.
If supplied, this replaces the final `min_alpha` from the constructor, for this one call to
:meth:`~gensim.models.doc2vec.Doc2Vec.train`.
Use only if making multiple calls to :meth:`~gensim.models.doc2vec.Doc2Vec.train`, when you want to manage
the alpha learning-rate yourself (not recommended).
word_count : int, optional
Count of words already trained. Set this to 0 for the usual
case of training on all words in documents.
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`, optional
List of callbacks that need to be executed/run at specific stages during training.
"""
if corpus_file is None and corpus_iterable is None:
raise TypeError("Either one of corpus_file or corpus_iterable value must be provided")
if corpus_file is not None and corpus_iterable is not None:
raise TypeError("Both corpus_file and corpus_iterable must not be provided at the same time")
if corpus_iterable is None and not os.path.isfile(corpus_file):
raise TypeError("Parameter corpus_file must be a valid path to a file, got %r instead" % corpus_file)
if corpus_iterable is not None and not isinstance(corpus_iterable, Iterable):
raise TypeError("corpus_iterable must be an iterable of TaggedDocument, got %r instead" % corpus_iterable)
if corpus_file is not None:
# Calculate offsets for each worker along with initial doctags (doctag ~ document/line number in a file)
offsets, start_doctags = self._get_offsets_and_start_doctags_for_corpusfile(corpus_file, self.workers)
kwargs['offsets'] = offsets
kwargs['start_doctags'] = start_doctags
super(Doc2Vec, self).train(
corpus_iterable=corpus_iterable, corpus_file=corpus_file,
total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, callbacks=callbacks, **kwargs)
@classmethod
def _get_offsets_and_start_doctags_for_corpusfile(cls, corpus_file, workers):
"""Get offset and initial document tag in a corpus_file for each worker.
Firstly, approximate offsets are calculated based on number of workers and corpus_file size.
Secondly, for each approximate offset we find the maximum offset which points to the beginning of line and
less than approximate offset.
Parameters
----------
corpus_file : str
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
workers : int
Number of workers.
Returns
-------
list of int, list of int
Lists with offsets and document tags with length = number of workers.
"""
corpus_file_size = os.path.getsize(corpus_file)
approx_offsets = [int(corpus_file_size // workers * i) for i in range(workers)]
offsets = []
start_doctags = []
with utils.open(corpus_file, mode='rb') as fin:
curr_offset_idx = 0
prev_filepos = 0
for line_no, line in enumerate(fin):
if curr_offset_idx == len(approx_offsets):
break
curr_filepos = prev_filepos + len(line)
while curr_offset_idx != len(approx_offsets) and approx_offsets[curr_offset_idx] < curr_filepos:
offsets.append(prev_filepos)
start_doctags.append(line_no)
curr_offset_idx += 1
prev_filepos = curr_filepos
return offsets, start_doctags
def _raw_word_count(self, job):
"""Get the number of words in a given job.
Parameters
----------
job : iterable of list of :class:`~gensim.models.doc2vec.TaggedDocument`
Corpus chunk.
Returns
-------
int
Number of raw words in the corpus chunk.
"""
return sum(len(sentence.words) for sentence in job)
def estimated_lookup_memory(self):
"""Get estimated memory for tag lookup, 0 if using pure int tags.
Returns
-------
int
The estimated RAM required to look up a tag in bytes.
"""
return 60 * len(self.dv) + 140 * len(self.dv)
def infer_vector(self, doc_words, alpha=None, min_alpha=None, epochs=None):
"""Infer a vector for given post-bulk training document.
Notes
-----
Subsequent calls to this function may infer different representations for the same document.
For a more stable representation, increase the number of epochs to assert a stricter convergence.
Parameters
----------
doc_words : list of str
A document for which the vector representation will be inferred.
alpha : float, optional
The initial learning rate. If unspecified, value from model initialization will be reused.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` over all inference epochs. If unspecified,
value from model initialization will be reused.
epochs : int, optional
Number of times to train the new document. Larger values take more time, but may improve
quality and run-to-run stability of inferred vectors. If unspecified, the `epochs` value
from model initialization will be reused.
Returns
-------
np.ndarray
The inferred paragraph vector for the new document.
"""
if isinstance(doc_words, str): # a common mistake; fail with a nicer error
raise TypeError("Parameter doc_words of infer_vector() must be a list of strings (not a single string).")
alpha = alpha or self.alpha
min_alpha = min_alpha or self.min_alpha
epochs = epochs or self.epochs
doctag_vectors = pseudorandom_weak_vector(self.dv.vector_size, seed_string=' '.join(doc_words))
doctag_vectors = doctag_vectors.reshape(1, self.dv.vector_size)
doctags_lockf = np.ones(1, dtype=REAL)
doctag_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
alpha_delta = (alpha - min_alpha) / max(epochs - 1, 1)
for i in range(epochs):
if self.sg:
train_document_dbow(
self, doc_words, doctag_indexes, alpha, work,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
elif self.dm_concat:
train_document_dm_concat(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
else:
train_document_dm(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf
)
alpha -= alpha_delta
return doctag_vectors[0]
def __getitem__(self, tag):
"""Get the vector representation of (possibly multi-term) tag.
Parameters
----------
tag : {str, int, list of str, list of int}
The tag (or tags) to be looked up in the model.
Returns
-------
np.ndarray
The vector representations of each tag as a matrix (will be 1D if `tag` was a single tag)
"""
if isinstance(tag, (str, int, integer,)):
if tag not in self.wv:
return self.dv[tag]
return self.wv[tag]
return vstack([self[i] for i in tag])
def __str__(self):
"""Abbreviated name reflecting major configuration parameters.
Returns
-------
str
Human readable representation of the models internal state.
"""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.dv.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s<%s>' % (self.__class__.__name__, ','.join(segments))
def save_word2vec_format(self, fname, doctag_vec=False, word_vec=True, prefix='*dt_', fvocab=None, binary=False):
"""Store the input-hidden weight matrix in the same format used by the original C word2vec-tool.
Parameters
----------
fname : str
The file path used to save the vectors in.
doctag_vec : bool, optional
Indicates whether to store document vectors.
word_vec : bool, optional
Indicates whether to store word vectors.
prefix : str, optional
Uniquely identifies doctags from word vocab, and avoids collision in case of repeated string in doctag
and word vocab.
fvocab : str, optional
Optional file path used to save the vocabulary.
binary : bool, optional
If True, the data will be saved in binary word2vec format, otherwise - will be saved in plain text.
"""
total_vec = None
# save word vectors
if word_vec:
if doctag_vec:
total_vec = len(self.wv) + len(self.dv)
self.wv.save_word2vec_format(fname, fvocab, binary, total_vec)
# save document vectors
if doctag_vec:
write_header = True
append = False
if word_vec:
# simply appending to existing file
write_header = False
append = True
self.dv.save_word2vec_format(
fname, prefix=prefix, fvocab=fvocab, binary=binary,
write_header=write_header, append=append,
sort_attr='doc_count')
@deprecated(
"Gensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. "
"init_sims() is now obsoleted and will be completely removed in future versions. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors. Obsoleted.
If you need a single unit-normalized vector for some key, call
:meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
``doc2vec_model.dv.get_vector(key, norm=True)``.
To refresh norms after you performed some atypical out-of-band vector tampering,
call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.
Parameters
----------
replace : bool
If True, forget the original trained vectors and only keep the normalized ones.
You lose information if you do this.
"""
self.dv.init_sims(replace=replace)
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved :class:`~gensim.models.doc2vec.Doc2Vec` model.
Parameters
----------
fname : str
Path to the saved file.
*args : object
Additional arguments, see `~gensim.models.word2vec.Word2Vec.load`.
**kwargs : object
Additional arguments, see `~gensim.models.word2vec.Word2Vec.load`.
See Also
--------
:meth:`~gensim.models.doc2vec.Doc2Vec.save`
Save :class:`~gensim.models.doc2vec.Doc2Vec` model.
Returns
-------
:class:`~gensim.models.doc2vec.Doc2Vec`
Loaded model.
"""
try:
return super(Doc2Vec, cls).load(*args, rethrow=True, **kwargs)
except AttributeError as ae:
logger.error(
"Model load error. Was model saved using code from an older Gensim version? "
"Try loading older model using gensim-3.8.3, then re-saving, to restore "
"compatibility with current code.")
raise ae
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings.
Parameters
----------
vocab_size : int, optional
Number of raw words in the vocabulary.
report : dict of (str, int), optional
A dictionary from string representations of the **specific** model's memory consuming members
to their size in bytes.
Returns
-------
dict of (str, int), optional
A dictionary from string representations of the model's memory consuming members to their size in bytes.
Includes members from the base classes as well as weights and tag lookup memory estimation specific to the
class.
"""
report = report or {}
report['doctag_lookup'] = self.estimated_lookup_memory()
report['doctag_syn0'] = len(self.dv) * self.vector_size * dtype(REAL).itemsize
return super(Doc2Vec, self).estimate_memory(vocab_size, report=report)
def build_vocab(
self, corpus_iterable=None, corpus_file=None, update=False, progress_per=10000,
keep_raw_vocab=False, trim_rule=None, **kwargs,
):
"""Build vocabulary from a sequence of documents (can be a once-only generator stream).
Parameters
----------
documents : iterable of list of :class:`~gensim.models.doc2vec.TaggedDocument`, optional
Can be simply a list of :class:`~gensim.models.doc2vec.TaggedDocument` elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
See :class:`~gensim.models.doc2vec.TaggedBrownCorpus` or :class:`~gensim.models.doc2vec.TaggedLineDocument`
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `documents` to get performance boost. Only one of `documents` or
`corpus_file` arguments need to be passed (not both of them). Documents' tags are assigned automatically
and are equal to a line number, as in :class:`~gensim.models.doc2vec.TaggedLineDocument`.
update : bool
If true, the new words in `documents` will be added to model's vocab.
progress_per : int
Indicates how many words to process before showing/updating the progress.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
**kwargs
Additional key word arguments passed to the internal vocabulary construction.
"""
total_words, corpus_count = self.scan_vocab(
corpus_iterable=corpus_iterable, corpus_file=corpus_file,
progress_per=progress_per, trim_rule=trim_rule,
)
self.corpus_count = corpus_count
self.corpus_total_words = total_words
report_values = self.prepare_vocab(update=update, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, **kwargs)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update)
def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):
"""Build vocabulary from a dictionary of word frequencies.
Build model vocabulary from a passed dictionary that contains a (word -> word count) mapping.
Words must be of type unicode strings.
Parameters
----------
word_freq : dict of (str, int)
Word <-> count mapping.
keep_raw_vocab : bool, optional
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
corpus_count : int, optional
Even if no corpus is provided, this argument can set corpus_count explicitly.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during
:meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
update : bool, optional
If true, the new provided words in `word_freq` dict will be added to model's vocab.
"""
logger.info("processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab.
raw_vocab = word_freq
logger.info(
"collected %i different raw words, with total frequency of %i",
len(raw_vocab), sum(raw_vocab.values()),
)
# Since no documents are provided, this is to control the corpus_count
self.corpus_count = corpus_count or 0
self.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
report_values = self.prepare_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update)
def _scan_vocab(self, corpus_iterable, progress_per, trim_rule):
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
max_rawint = -1 # highest raw int tag seen (-1 for none)
doctags_lookup = {}
doctags_list = []
for document_no, document in enumerate(corpus_iterable):
if not checked_string_types:
if isinstance(document.words, str):
logger.warning(
"Each 'words' should be a list of words (usually unicode strings). "
"First 'words' here is instead plain %s.",
type(document.words),
)
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info(
"PROGRESS: at example #%i, processed %i words (%i words/s), %i word types, %i tags",
document_no, total_words, interval_rate, len(vocab), len(doctags_list)
)
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
# Note a document tag during initial corpus scan, for structure sizing.
if isinstance(tag, (int, integer,)):
max_rawint = max(max_rawint, tag)
else:
if tag in doctags_lookup:
doctags_lookup[tag].doc_count += 1
doctags_lookup[tag].word_count += document_length
else:
doctags_lookup[tag] = Doctag(index=len(doctags_list), word_count=document_length, doc_count=1)
doctags_list.append(tag)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
corpus_count = document_no + 1
if len(doctags_list) > corpus_count:
logger.warning("More unique tags (%i) than documents (%i).", len(doctags_list), corpus_count)
if max_rawint > corpus_count:
logger.warning(
"Highest int doctag (%i) larger than count of documents (%i). This means "
"at least %i excess, unused slots (%i bytes) will be allocated for vectors.",
max_rawint, corpus_count, max_rawint - corpus_count,
(max_rawint - corpus_count) * self.vector_size * dtype(REAL).itemsize,
)
if max_rawint > -1:
# adjust indexes/list to account for range of pure-int keyed doctags
for key in doctags_list:
doctags_lookup[key].index = doctags_lookup[key].index + max_rawint + 1
doctags_list = list(range(0, max_rawint + 1)) + doctags_list
self.dv.index_to_key = doctags_list
for t, dt in doctags_lookup.items():
self.dv.key_to_index[t] = dt.index
self.dv.set_vecattr(t, 'word_count', dt.word_count)
self.dv.set_vecattr(t, 'doc_count', dt.doc_count)
self.raw_vocab = vocab
return total_words, corpus_count
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None):
"""Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count.
Parameters
----------
documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional
The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster).
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `documents` to get performance boost. Only one of `documents` or
`corpus_file` arguments need to be passed (not both of them).
progress_per : int
Progress will be logged every `progress_per` documents.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during
:meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
Returns
-------
(int, int)
Tuple of `(total words in the corpus, number of documents)`.
"""
logger.info("collecting all words and their counts")
if corpus_file is not None:
corpus_iterable = TaggedLineDocument(corpus_file)
total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule)
logger.info(
"collected %i word types and %i unique tags from a corpus of %i examples and %i words",
len(self.raw_vocab), len(self.dv), corpus_count, total_words,
)
return total_words, corpus_count
def similarity_unseen_docs(self, doc_words1, doc_words2, alpha=None, min_alpha=None, epochs=None):
"""Compute cosine similarity between two post-bulk out of training documents.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
An instance of a trained `Doc2Vec` model.
doc_words1 : list of str
Input document.
doc_words2 : list of str
Input document.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
epochs : int, optional
Number of epoch to train the new document.
Returns
-------
float
The cosine similarity between `doc_words1` and `doc_words2`.
"""
d1 = self.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, epochs=epochs)
d2 = self.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, epochs=epochs)
return np.dot(matutils.unitvec(d1), matutils.unitvec(d2))
class Doc2VecVocab(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture"""
class Doc2VecTrainables(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture"""
class TaggedBrownCorpus:
def __init__(self, dirname):
"""Reader for the `Brown corpus (part of NLTK data) <http://www.nltk.org/book/ch02.html#tab-brown-sources>`_.
Parameters
----------
dirname : str
Path to folder with Brown corpus.
"""
self.dirname = dirname
def __iter__(self):
"""Iterate through the corpus.
Yields
------
:class:`~gensim.models.doc2vec.TaggedDocument`
Document from `source`.
"""
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
with utils.open(fname, 'rb') as fin:
for item_no, line in enumerate(fin):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield TaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)])
class TaggedLineDocument:
def __init__(self, source):
"""Iterate over a file that contains documents:
one line = :class:`~gensim.models.doc2vec.TaggedDocument` object.
Words are expected to be already preprocessed and separated by whitespace. Document tags are constructed
automatically from the document line number (each document gets a unique integer tag).
Parameters
----------
source : string or a file-like object
Path to the file on disk, or an already-open file object (must support `seek(0)`).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.doc2vec import TaggedLineDocument
>>>
>>> for document in TaggedLineDocument(datapath("head500.noblanks.cor")):
... pass
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source.
Yields
------
:class:`~gensim.models.doc2vec.TaggedDocument`
Document from `source` specified in the constructor.
"""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.open(self.source, 'rb') as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
| 54,757
|
Python
|
.py
| 994
| 44.140845
| 133
| 0.635681
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,131
|
doc2vec_inner.pyx
|
piskvorky_gensim/gensim/models/doc2vec_inner.pyx
|
#!/usr/bin/env cython
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized cython functions for training :class:`~gensim.models.doc2vec.Doc2Vec` model."""
import cython
import numpy as np
from numpy import zeros, float32 as REAL
cimport numpy as np
from libc.string cimport memset, memcpy
# scipy <= 0.15
try:
from scipy.linalg.blas import fblas
except ImportError:
# in scipy > 0.15, fblas function has been removed
import scipy.linalg.blas as fblas
from gensim.models.word2vec_inner cimport bisect_left, random_int32, sscal, REAL_t, EXP_TABLE, our_dot, our_saxpy
DEF MAX_DOCUMENT_LEN = 10000
cdef int ONE = 1
cdef REAL_t ONEF = <REAL_t>1.0
DEF EXP_TABLE_SIZE = 1000
DEF MAX_EXP = 6
cdef void fast_document_dbow_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, const int codelen,
REAL_t *context_vectors, REAL_t *syn1, const int size,
const np.uint32_t context_index, const REAL_t alpha, REAL_t *work, int learn_context, int learn_hidden,
REAL_t *contexts_lockf, const np.uint32_t contexts_lockf_len) nogil:
cdef long long a, b
cdef long long row1 = context_index * size, row2
cdef REAL_t f, g
memset(work, 0, size * cython.sizeof(REAL_t))
for b in range(codelen):
row2 = word_point[b] * size
f = our_dot(&size, &context_vectors[row1], &ONE, &syn1[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * alpha
our_saxpy(&size, &g, &syn1[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&size, &g, &context_vectors[row1], &ONE, &syn1[row2], &ONE)
if learn_context:
our_saxpy(&size, &contexts_lockf[context_index % contexts_lockf_len],
work, &ONE, &context_vectors[row1], &ONE)
cdef unsigned long long fast_document_dbow_neg(
const int negative, np.uint32_t *cum_table, unsigned long long cum_table_len,
REAL_t *context_vectors, REAL_t *syn1neg, const int size, const np.uint32_t word_index,
const np.uint32_t context_index, const REAL_t alpha, REAL_t *work,
unsigned long long next_random, int learn_context, int learn_hidden, REAL_t *contexts_lockf,
const np.uint32_t contexts_lockf_len) nogil:
cdef long long a
cdef long long row1 = context_index * size, row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, label
cdef np.uint32_t target_index
cdef int d
memset(work, 0, size * cython.sizeof(REAL_t))
for d in range(negative+1):
if d == 0:
target_index = word_index
label = ONEF
else:
target_index = bisect_left(cum_table, (next_random >> 16) % cum_table[cum_table_len-1], 0, cum_table_len)
next_random = (next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == word_index:
continue
label = <REAL_t>0.0
row2 = target_index * size
f = our_dot(&size, &context_vectors[row1], &ONE, &syn1neg[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * alpha
our_saxpy(&size, &g, &syn1neg[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&size, &g, &context_vectors[row1], &ONE, &syn1neg[row2], &ONE)
if learn_context:
our_saxpy(&size, &contexts_lockf[context_index % contexts_lockf_len],
work, &ONE, &context_vectors[row1], &ONE)
return next_random
cdef void fast_document_dm_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, int word_code_len,
REAL_t *neu1, REAL_t *syn1, const REAL_t alpha, REAL_t *work,
const int size, int learn_hidden) nogil:
cdef long long b
cdef long long row2
cdef REAL_t f, g
# l1 already composed by caller, passed in as neu1
# work (also passed in) will accumulate l1 error
for b in range(word_code_len):
row2 = word_point[b] * size
f = our_dot(&size, neu1, &ONE, &syn1[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * alpha
our_saxpy(&size, &g, &syn1[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&size, &g, neu1, &ONE, &syn1[row2], &ONE)
cdef unsigned long long fast_document_dm_neg(
const int negative, np.uint32_t *cum_table, unsigned long long cum_table_len, unsigned long long next_random,
REAL_t *neu1, REAL_t *syn1neg, const int predict_word_index, const REAL_t alpha, REAL_t *work,
const int size, int learn_hidden) nogil:
cdef long long row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, label
cdef np.uint32_t target_index
cdef int d
# l1 already composed by caller, passed in as neu1
# work (also passsed in) will accumulate l1 error for outside application
for d in range(negative+1):
if d == 0:
target_index = predict_word_index
label = ONEF
else:
target_index = bisect_left(cum_table, (next_random >> 16) % cum_table[cum_table_len-1], 0, cum_table_len)
next_random = (next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == predict_word_index:
continue
label = <REAL_t>0.0
row2 = target_index * size
f = our_dot(&size, neu1, &ONE, &syn1neg[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * alpha
our_saxpy(&size, &g, &syn1neg[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&size, &g, neu1, &ONE, &syn1neg[row2], &ONE)
return next_random
cdef void fast_document_dmc_hs(
const np.uint32_t *word_point, const np.uint8_t *word_code, int word_code_len,
REAL_t *neu1, REAL_t *syn1, const REAL_t alpha, REAL_t *work,
const int layer1_size, const int vector_size, int learn_hidden) nogil:
cdef long long a, b
cdef long long row2
cdef REAL_t f, g
cdef int m
# l1 already composed by caller, passed in as neu1
# work accumulates net l1 error; eventually applied by caller
for b in range(word_code_len):
row2 = word_point[b] * layer1_size
f = our_dot(&layer1_size, neu1, &ONE, &syn1[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * alpha
our_saxpy(&layer1_size, &g, &syn1[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&layer1_size, &g, neu1, &ONE, &syn1[row2], &ONE)
cdef unsigned long long fast_document_dmc_neg(
const int negative, np.uint32_t *cum_table, unsigned long long cum_table_len, unsigned long long next_random,
REAL_t *neu1, REAL_t *syn1neg, const int predict_word_index, const REAL_t alpha, REAL_t *work,
const int layer1_size, const int vector_size, int learn_hidden) nogil:
cdef long long a
cdef long long row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, label
cdef np.uint32_t target_index
cdef int d, m
# l1 already composed by caller, passed in as neu1
# work accumulates net l1 error; eventually applied by caller
for d in range(negative+1):
if d == 0:
target_index = predict_word_index
label = ONEF
else:
target_index = bisect_left(cum_table, (next_random >> 16) % cum_table[cum_table_len-1], 0, cum_table_len)
next_random = (next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == predict_word_index:
continue
label = <REAL_t>0.0
row2 = target_index * layer1_size
f = our_dot(&layer1_size, neu1, &ONE, &syn1neg[row2], &ONE)
if f <= -MAX_EXP or f >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * alpha
our_saxpy(&layer1_size, &g, &syn1neg[row2], &ONE, work, &ONE)
if learn_hidden:
our_saxpy(&layer1_size, &g, neu1, &ONE, &syn1neg[row2], &ONE)
return next_random
cdef init_d2v_config(Doc2VecConfig *c, model, alpha, learn_doctags, learn_words, learn_hidden,
train_words=False, work=None, neu1=None, word_vectors=None, words_lockf=None,
doctag_vectors=None, doctags_lockf=None, docvecs_count=0):
c[0].hs = model.hs
c[0].negative = model.negative
c[0].sample = (model.sample != 0)
c[0].cbow_mean = model.cbow_mean
c[0].train_words = train_words
c[0].learn_doctags = learn_doctags
c[0].learn_words = learn_words
c[0].learn_hidden = learn_hidden
c[0].alpha = alpha
c[0].layer1_size = model.layer1_size
c[0].vector_size = model.dv.vector_size
c[0].workers = model.workers
c[0].docvecs_count = docvecs_count
c[0].window = model.window
c[0].expected_doctag_len = model.dm_tag_count
if '\0' in model.wv:
c[0].null_word_index = model.wv.get_index('\0')
# default vectors, locks from syn0/doctag_syn0
if word_vectors is None:
word_vectors = model.wv.vectors
c[0].word_vectors = <REAL_t *>(np.PyArray_DATA(word_vectors))
if doctag_vectors is None:
doctag_vectors = model.dv.vectors
c[0].doctag_vectors = <REAL_t *>(np.PyArray_DATA(doctag_vectors))
if words_lockf is None:
words_lockf = model.wv.vectors_lockf
c[0].words_lockf = <REAL_t *>(np.PyArray_DATA(words_lockf))
c[0].words_lockf_len = len(words_lockf)
if doctags_lockf is None:
doctags_lockf = model.dv.vectors_lockf
c[0].doctags_lockf = <REAL_t *>(np.PyArray_DATA(doctags_lockf))
c[0].doctags_lockf_len = len(doctags_lockf)
if c[0].hs:
c[0].syn1 = <REAL_t *>(np.PyArray_DATA(model.syn1))
if c[0].negative:
c[0].syn1neg = <REAL_t *>(np.PyArray_DATA(model.syn1neg))
c[0].cum_table = <np.uint32_t *>(np.PyArray_DATA(model.cum_table))
c[0].cum_table_len = len(model.cum_table)
if c[0].negative or c[0].sample:
c[0].next_random = (2**24) * model.random.randint(0, 2**24) + model.random.randint(0, 2**24)
# convert Python structures to primitive types, so we can release the GIL
if work is None:
work = zeros(model.layer1_size, dtype=REAL)
c[0].work = <REAL_t *>np.PyArray_DATA(work)
if neu1 is None:
neu1 = zeros(model.layer1_size, dtype=REAL)
c[0].neu1 = <REAL_t *>np.PyArray_DATA(neu1)
def train_document_dbow(model, doc_words, doctag_indexes, alpha, work=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, words_lockf=None, doctag_vectors=None, doctags_lockf=None):
"""Update distributed bag of words model ("PV-DBOW") by training on a single document.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train` and
:meth:`~gensim.models.doc2vec.Doc2Vec.infer_vector`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The model to train.
doc_words : list of str
The input document as a list of words to be used for training. Each word will be looked up in
the model's vocabulary.
doctag_indexes : list of int
Indices into `doctag_vectors` used to obtain the tags of the document.
alpha : float
Learning rate.
work : list of float, optional
Updates to be performed on each neuron in the hidden layer of the underlying network.
train_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both** `learn_words`
and `train_words` are set to True.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector; value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef int i, j
cdef long result = 0
cdef np.uint32_t *vocab_sample_ints
init_d2v_config(&c, model, alpha, learn_doctags, learn_words, learn_hidden, train_words=train_words, work=work,
neu1=None, word_vectors=word_vectors, words_lockf=words_lockf,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf)
c.doctag_len = <int>min(MAX_DOCUMENT_LEN, len(doctag_indexes))
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(model.wv.expandos['sample_int'])
if c.hs:
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
i = 0
for token in doc_words:
word_index = model.wv.key_to_index.get(token, None)
if word_index is None: # shrink document to leave out word
continue # leaving i unchanged
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[i] = word_index
if c.hs:
c.codelens[i] = <int>len(vocab_codes[word_index])
c.codes[i] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[i] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
result += 1
i += 1
if i == MAX_DOCUMENT_LEN:
break # TODO: log warning, tally overflow?
c.document_len = i
if c.train_words:
# single randint() call avoids a big thread-synchronization slowdown
if model.shrink_windows:
for i, item in enumerate(model.random.randint(0, c.window, c.document_len)):
c.reduced_windows[i] = item
else:
for i in range(c.document_len):
c.reduced_windows[i] = 0
for i in range(c.doctag_len):
c.doctag_indexes[i] = doctag_indexes[i]
result += 1
# release GIL & train on the document
with nogil:
for i in range(c.document_len):
if c.train_words: # simultaneous skip-gram wordvec-training
j = i - c.window + c.reduced_windows[i]
if j < 0:
j = 0
k = i + c.window + 1 - c.reduced_windows[i]
if k > c.document_len:
k = c.document_len
for j in range(j, k):
if j == i:
continue
if c.hs:
# we reuse the DBOW function, as it is equivalent to skip-gram for this purpose
fast_document_dbow_hs(c.points[i], c.codes[i], c.codelens[i], c.word_vectors, c.syn1, c.layer1_size,
c.indexes[j], c.alpha, c.work, c.learn_words, c.learn_hidden, c.words_lockf,
c.words_lockf_len)
if c.negative:
# we reuse the DBOW function, as it is equivalent to skip-gram for this purpose
c.next_random = fast_document_dbow_neg(c.negative, c.cum_table, c.cum_table_len, c.word_vectors,
c.syn1neg, c.layer1_size, c.indexes[i], c.indexes[j],
c.alpha, c.work, c.next_random, c.learn_words,
c.learn_hidden, c.words_lockf, c.words_lockf_len)
# docvec-training
for j in range(c.doctag_len):
if c.hs:
fast_document_dbow_hs(c.points[i], c.codes[i], c.codelens[i], c.doctag_vectors, c.syn1, c.layer1_size,
c.doctag_indexes[j], c.alpha, c.work, c.learn_doctags, c.learn_hidden, c.doctags_lockf,
c.doctags_lockf_len)
if c.negative:
c.next_random = fast_document_dbow_neg(c.negative, c.cum_table, c.cum_table_len, c.doctag_vectors,
c.syn1neg, c.layer1_size, c.indexes[i], c.doctag_indexes[j],
c.alpha, c.work, c.next_random, c.learn_doctags,
c.learn_hidden, c.doctags_lockf, c.doctags_lockf_len)
return result
def train_document_dm(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, words_lockf=None, doctag_vectors=None, doctags_lockf=None):
"""Update distributed memory model ("PV-DM") by training on a single document.
This method implements the DM model with a projection (input) layer that is either the sum or mean of the context
vectors, depending on the model's `dm_mean` configuration field.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train` and
:meth:`~gensim.models.doc2vec.Doc2Vec.infer_vector`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The model to train.
doc_words : list of str
The input document as a list of words to be used for training. Each word will be looked up in
the model's vocabulary.
doctag_indexes : list of int
Indices into `doctag_vectors` used to obtain the tags of the document.
alpha : float
Learning rate.
work : np.ndarray, optional
Private working memory for each worker.
neu1 : np.ndarray, optional
Private working memory for each worker.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector; value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef REAL_t count, inv_count = 1.0
cdef int i, j, k, m
cdef long result = 0
cdef np.uint32_t *vocab_sample_ints
init_d2v_config(&c, model, alpha, learn_doctags, learn_words, learn_hidden, train_words=False,
work=work, neu1=neu1, word_vectors=word_vectors, words_lockf=words_lockf,
doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf)
c.doctag_len = <int>min(MAX_DOCUMENT_LEN, len(doctag_indexes))
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(model.wv.expandos['sample_int'])
# vocab_sample_ints = model.wv.expandos['sample_int'] # this variant noticeably slower
if c.hs:
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
i = 0
for token in doc_words:
word_index = model.wv.key_to_index.get(token, None)
if word_index is None: # shrink document to leave out word
continue # leaving i unchanged
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[i] = word_index
if c.hs:
c.codelens[i] = <int>len(vocab_codes[word_index])
c.codes[i] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[i] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
result += 1
i += 1
if i == MAX_DOCUMENT_LEN:
break # TODO: log warning, tally overflow?
c.document_len = i
# single randint() call avoids a big thread-sync slowdown
if model.shrink_windows:
for i, item in enumerate(model.random.randint(0, c.window, c.document_len)):
c.reduced_windows[i] = item
else:
for i in range(c.document_len):
c.reduced_windows[i] = 0
for i in range(c.doctag_len):
c.doctag_indexes[i] = doctag_indexes[i]
result += 1
# release GIL & train on the document
with nogil:
for i in range(c.document_len):
j = i - c.window + c.reduced_windows[i]
if j < 0:
j = 0
k = i + c.window + 1 - c.reduced_windows[i]
if k > c.document_len:
k = c.document_len
# compose l1 (in _neu1) & clear _work
memset(c.neu1, 0, c.layer1_size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
else:
count += ONEF
our_saxpy(&c.layer1_size, &ONEF, &c.word_vectors[c.indexes[m] * c.layer1_size], &ONE, c.neu1, &ONE)
for m in range(c.doctag_len):
count += ONEF
our_saxpy(&c.layer1_size, &ONEF, &c.doctag_vectors[c.doctag_indexes[m] * c.layer1_size], &ONE, c.neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF/count
if c.cbow_mean:
sscal(&c.layer1_size, &inv_count, c.neu1, &ONE) # (does this need BLAS-variants like saxpy?)
memset(c.work, 0, c.layer1_size * cython.sizeof(REAL_t)) # work to accumulate l1 error
if c.hs:
fast_document_dm_hs(c.points[i], c.codes[i], c.codelens[i], c.neu1, c.syn1, c.alpha, c.work,
c.layer1_size, c.learn_hidden)
if c.negative:
c.next_random = fast_document_dm_neg(c.negative, c.cum_table, c.cum_table_len, c.next_random,
c.neu1, c.syn1neg, c.indexes[i], c.alpha, c.work, c.layer1_size,
c.learn_hidden)
if not c.cbow_mean:
sscal(&c.layer1_size, &inv_count, c.work, &ONE) # (does this need BLAS-variants like saxpy?)
# apply accumulated error in work
if c.learn_doctags:
for m in range(c.doctag_len):
our_saxpy(&c.layer1_size, &c.doctags_lockf[c.doctag_indexes[m] % c.doctags_lockf_len], c.work,
&ONE, &c.doctag_vectors[c.doctag_indexes[m] * c.layer1_size], &ONE)
if c.learn_words:
for m in range(j, k):
if m == i:
continue
else:
our_saxpy(&c.layer1_size, &c.words_lockf[c.indexes[m] % c.doctags_lockf_len], c.work, &ONE,
&c.word_vectors[c.indexes[m] * c.layer1_size], &ONE)
return result
def train_document_dm_concat(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, words_lockf=None, doctag_vectors=None, doctags_lockf=None):
"""Update distributed memory model ("PV-DM") by training on a single document, using a concatenation of the
context window word vectors (rather than a sum or average).
This will be slower since the input at each batch will be significantly larger.
Called internally from :meth:`~gensim.models.doc2vec.Doc2Vec.train` and
:meth:`~gensim.models.doc2vec.Doc2Vec.infer_vector`.
Parameters
----------
model : :class:`~gensim.models.doc2vec.Doc2Vec`
The model to train.
doc_words : list of str
The input document as a list of words to be used for training. Each word will be looked up in
the model's vocabulary.
doctag_indexes : list of int
Indices into `doctag_vectors` used to obtain the tags of the document.
alpha : float, optional
Learning rate.
work : np.ndarray, optional
Private working memory for each worker.
neu1 : np.ndarray, optional
Private working memory for each worker.
learn_doctags : bool, optional
Whether the tag vectors should be updated.
learn_words : bool, optional
Word vectors will be updated exactly as per Word2Vec skip-gram training only if **both**
`learn_words` and `train_words` are set to True.
learn_hidden : bool, optional
Whether or not the weights of the hidden layer will be updated.
word_vectors : numpy.ndarray, optional
The vector representation for each word in the vocabulary. If None, these will be retrieved from the model.
words_lockf : numpy.ndarray, optional
EXPERIMENTAL. A learning lock factor for each word-vector, value 0.0 completely blocks updates, a value
of 1.0 allows normal updates to word-vectors.
doctag_vectors : numpy.ndarray, optional
Vector representations of the tags. If None, these will be retrieved from the model.
doctags_lockf : numpy.ndarray, optional
EXPERIMENTAL. The lock factors for each tag, same as `words_lockf`, but for document-vectors.
Returns
-------
int
Number of words in the input document that were actually used for training.
"""
cdef Doc2VecConfig c
cdef int i, j, k, m, n
cdef long result = 0
cdef np.uint32_t *vocab_sample_ints
init_d2v_config(&c, model, alpha, learn_doctags, learn_words, learn_hidden, train_words=False, work=work, neu1=neu1,
word_vectors=word_vectors, words_lockf=words_lockf, doctag_vectors=doctag_vectors, doctags_lockf=doctags_lockf)
c.doctag_len = <int>min(MAX_DOCUMENT_LEN, len(doctag_indexes))
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(model.wv.expandos['sample_int'])
if c.hs:
vocab_codes = model.wv.expandos['code']
vocab_points = model.wv.expandos['point']
if c.doctag_len != c.expected_doctag_len:
return 0 # skip doc without expected number of tags
i = 0
for token in doc_words:
word_index = model.wv.key_to_index.get(token, None)
if word_index is None: # shrink document to leave out word
continue # leaving i unchanged
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[i] = word_index
if c.hs:
c.codelens[i] = <int>len(vocab_codes[word_index])
c.codes[i] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[i] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
result += 1
i += 1
if i == MAX_DOCUMENT_LEN:
break # TODO: log warning, tally overflow?
c.document_len = i
for i in range(c.doctag_len):
c.doctag_indexes[i] = doctag_indexes[i]
result += 1
# release GIL & train on the document
with nogil:
for i in range(c.document_len):
j = i - c.window # negative OK: will pad with null word
k = i + c.window + 1 # past document end OK: will pad with null word
# compose l1 & clear work
for m in range(c.doctag_len):
# doc vector(s)
memcpy(&c.neu1[m * c.vector_size], &c.doctag_vectors[c.doctag_indexes[m] * c.vector_size],
c.vector_size * cython.sizeof(REAL_t))
n = 0
for m in range(j, k):
# word vectors in window
if m == i:
continue
if m < 0 or m >= c.document_len:
c.window_indexes[n] = c.null_word_index
else:
c.window_indexes[n] = c.indexes[m]
n += 1
for m in range(2 * c.window):
memcpy(&c.neu1[(c.doctag_len + m) * c.vector_size], &c.word_vectors[c.window_indexes[m] * c.vector_size],
c.vector_size * cython.sizeof(REAL_t))
memset(c.work, 0, c.layer1_size * cython.sizeof(REAL_t)) # work to accumulate l1 error
if c.hs:
fast_document_dmc_hs(c.points[i], c.codes[i], c.codelens[i],
c.neu1, c.syn1, c.alpha, c.work,
c.layer1_size, c.vector_size, c.learn_hidden)
if c.negative:
c.next_random = fast_document_dmc_neg(c.negative, c.cum_table, c.cum_table_len, c.next_random,
c.neu1, c.syn1neg, c.indexes[i], c.alpha, c.work,
c.layer1_size, c.vector_size, c.learn_hidden)
if c.learn_doctags:
for m in range(c.doctag_len):
our_saxpy(&c.vector_size, &c.doctags_lockf[c.doctag_indexes[m] % c.doctags_lockf_len], &c.work[m * c.vector_size],
&ONE, &c.doctag_vectors[c.doctag_indexes[m] * c.vector_size], &ONE)
if c.learn_words:
for m in range(2 * c.window):
our_saxpy(&c.vector_size, &c.words_lockf[c.window_indexes[m] % c.words_lockf_len], &c.work[(c.doctag_len + m) * c.vector_size],
&ONE, &c.word_vectors[c.window_indexes[m] * c.vector_size], &ONE)
return result
| 31,445
|
Python
|
.py
| 611
| 40.972177
| 147
| 0.605419
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,132
|
ldamodel.py
|
piskvorky_gensim/gensim/models/ldamodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized `Latent Dirichlet Allocation (LDA) <https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_ in Python.
For a faster implementation of LDA (parallelized for multicore machines), see also :mod:`gensim.models.ldamulticore`.
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py script
<https://github.com/blei-lab/onlineldavb/blob/master/onlineldavb.py>`_, by
Matthew D. Hoffman, David M. Blei, Francis Bach:
`'Online Learning for Latent Dirichlet Allocation', NIPS 2010`_.
.. _'Online Learning for Latent Dirichlet Allocation', NIPS 2010: online-lda_
.. _'Online Learning for LDA' by Hoffman et al.: online-lda_
.. _online-lda: https://papers.neurips.cc/paper/2010/file/71f6278d140af599e06ad9bf1ba03cb0-Paper.pdf
The algorithm:
#. Is **streamed**: training documents may come in sequentially, no random access required.
#. Runs in **constant memory** w.r.t. the number of documents: size of the training corpus does not affect memory
footprint, can process corpora larger than RAM.
#. Is **distributed**: makes use of a cluster of machines, if available, to speed up model estimation.
Usage examples
--------------
Train an LDA model using a Gensim corpus
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> # Create a corpus from a list of texts
>>> common_dictionary = Dictionary(common_texts)
>>> common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
>>>
>>> # Train the model on the corpus.
>>> lda = LdaModel(common_corpus, num_topics=10)
Save a model to disk, or reload a pre-trained model
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Save model to disk.
>>> temp_file = datapath("model")
>>> lda.save(temp_file)
>>>
>>> # Load a potentially pretrained model from disk.
>>> lda = LdaModel.load(temp_file)
Query, the model using new, unseen documents
.. sourcecode:: pycon
>>> # Create a new corpus, made of previously unseen documents.
>>> other_texts = [
... ['computer', 'time', 'graph'],
... ['survey', 'response', 'eps'],
... ['human', 'system', 'computer']
... ]
>>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]
>>>
>>> unseen_doc = other_corpus[0]
>>> vector = lda[unseen_doc] # get topic probability distribution for a document
Update the model by incrementally training on the new corpus
.. sourcecode:: pycon
>>> lda.update(other_corpus)
>>> vector = lda[unseen_doc]
A lot of parameters can be tuned to optimize training for your specific case
.. sourcecode:: pycon
>>> lda = LdaModel(common_corpus, num_topics=50, alpha='auto', eval_every=5) # learn asymmetric alpha from data
"""
import logging
import numbers
import os
import time
from collections import defaultdict
import numpy as np
from scipy.special import gammaln, psi # gamma function utils
from scipy.special import polygamma
from gensim import interfaces, utils, matutils
from gensim.matutils import (
kullback_leibler, hellinger, jaccard_distance, jensen_shannon,
dirichlet_expectation, logsumexp, mean_absolute_difference,
)
from gensim.models import basemodel, CoherenceModel
from gensim.models.callbacks import Callback
logger = logging.getLogger(__name__)
def update_dir_prior(prior, N, logphat, rho):
"""Update a given prior using Newton's method, described in
`J. Huang: "Maximum Likelihood Estimation of Dirichlet Distribution Parameters"
<http://jonathan-huang.org/research/dirichlet/dirichlet.pdf>`_.
Parameters
----------
prior : list of float
The prior for each possible outcome at the previous iteration (to be updated).
N : int
Number of observations.
logphat : list of float
Log probabilities for the current estimation, also called "observed sufficient statistics".
rho : float
Learning rate.
Returns
-------
list of float
The updated prior.
"""
gradf = N * (psi(np.sum(prior)) - psi(prior) + logphat)
c = N * polygamma(1, np.sum(prior))
q = -N * polygamma(1, prior)
b = np.sum(gradf / q) / (1 / c + np.sum(1 / q))
dprior = -(gradf - b) / q
updated_prior = rho * dprior + prior
if all(updated_prior > 0):
prior = updated_prior
else:
logger.warning("updated prior is not positive")
return prior
class LdaState(utils.SaveLoad):
"""Encapsulate information for distributed computation of :class:`~gensim.models.ldamodel.LdaModel` objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, shape, dtype=np.float32):
"""
Parameters
----------
eta : numpy.ndarray
The prior probabilities assigned to each term.
shape : tuple of (int, int)
Shape of the sufficient statistics: (number of topics to be found, number of terms in the vocabulary).
dtype : type
Overrides the numpy array default types.
"""
self.eta = eta.astype(dtype, copy=False)
self.sstats = np.zeros(shape, dtype=dtype)
self.numdocs = 0
self.dtype = dtype
def reset(self):
"""Prepare the state for a new EM iteration (reset sufficient stats)."""
self.sstats[:] = 0.0
self.numdocs = 0
def merge(self, other):
"""Merge the result of an E step from one node with that of another node (summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).
Parameters
----------
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
"""
assert other is not None
self.sstats += other.sstats
self.numdocs += other.numdocs
def blend(self, rhot, other, targetsize=None):
"""Merge the current state with another one using a weighted average for the sufficient statistics.
The number of documents is stretched in both state objects, so that they are of comparable magnitude.
This procedure corresponds to the stochastic gradient update from
`'Online Learning for LDA' by Hoffman et al.`_, see equations (5) and (9).
Parameters
----------
rhot : float
Weight of the `other` state in the computed average. A value of 0.0 means that `other`
is completely ignored. A value of 1.0 means `self` is completely ignored.
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
targetsize : int, optional
The number of documents to stretch both states to.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# stretch the current model's expected n*phi counts to target size
if self.numdocs == 0 or targetsize == self.numdocs:
scale = 1.0
else:
scale = 1.0 * targetsize / self.numdocs
self.sstats *= (1.0 - rhot) * scale
# stretch the incoming n*phi counts to target size
if other.numdocs == 0 or targetsize == other.numdocs:
scale = 1.0
else:
logger.info("merging changes from %i documents into a model of %i documents", other.numdocs, targetsize)
scale = 1.0 * targetsize / other.numdocs
self.sstats += rhot * scale * other.sstats
self.numdocs = targetsize
def blend2(self, rhot, other, targetsize=None):
"""Merge the current state with another one using a weighted sum for the sufficient statistics.
In contrast to :meth:`~gensim.models.ldamodel.LdaState.blend`, the sufficient statistics are not scaled
prior to aggregation.
Parameters
----------
rhot : float
Unused.
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
targetsize : int, optional
The number of documents to stretch both states to.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# merge the two matrices by summing
self.sstats += other.sstats
self.numdocs = targetsize
def get_lambda(self):
"""Get the parameters of the posterior over the topics, also referred to as "the topics".
Returns
-------
numpy.ndarray
Parameters of the posterior probability over topics.
"""
return self.eta + self.sstats
def get_Elogbeta(self):
"""Get the log (posterior) probabilities for each topic.
Returns
-------
numpy.ndarray
Posterior probabilities for each topic.
"""
return dirichlet_expectation(self.get_lambda())
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously stored state from disk.
Overrides :class:`~gensim.utils.SaveLoad.load` by enforcing the `dtype` parameter
to ensure backwards compatibility.
Parameters
----------
fname : str
Path to file that contains the needed object.
args : object
Positional parameters to be propagated to class:`~gensim.utils.SaveLoad.load`
kwargs : object
Key-word parameters to be propagated to class:`~gensim.utils.SaveLoad.load`
Returns
-------
:class:`~gensim.models.ldamodel.LdaState`
The state loaded from the given file.
"""
result = super(LdaState, cls).load(fname, *args, **kwargs)
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (because it's the default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
return result
class LdaModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""Train and use Online Latent Dirichlet Allocation model as presented in
`'Online Learning for LDA' by Hoffman et al.`_
Examples
-------
Initialize a model using a Gensim corpus
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus
>>>
>>> lda = LdaModel(common_corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents.
.. sourcecode:: pycon
>>> doc_bow = [(1, 0.3), (2, 0.1), (0, 0.09)]
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents.
.. sourcecode:: pycon
>>> # In practice (corpus =/= initial training corpus), but we use the same here for simplicity.
>>> other_corpus = common_corpus
>>>
>>> lda.update(other_corpus)
Model persistency is achieved through :meth:`~gensim.models.ldamodel.LdaModel.load` and
:meth:`~gensim.models.ldamodel.LdaModel.save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None,
distributed=False, chunksize=2000, passes=1, update_every=1,
alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10,
iterations=50, gamma_threshold=0.001, minimum_probability=0.01,
random_state=None, ns_conf=None, minimum_phi_value=0.01,
per_word_topics=False, callbacks=None, dtype=np.float32):
"""
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
If you have a CSC in-memory matrix, you can convert it to a
streamed corpus with the help of gensim.matutils.Sparse2Corpus.
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaModel.update` manually).
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
id2word : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
distributed : bool, optional
Whether distributed computing should be used to accelerate training.
chunksize : int, optional
Number of documents to be used in each training chunk.
passes : int, optional
Number of passes through the corpus during training.
update_every : int, optional
Number of documents to be iterated through for each update.
Set to 0 for batch learning, > 1 for online iterative learning.
alpha : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on document-topic distribution, this can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
eta : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on topic-word distribution, this can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined.
Corresponds to :math:`\\kappa` from `'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
minimum_probability : float, optional
Topics with a probability lower than this threshold will be filtered out.
random_state : {np.random.RandomState, int}, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
ns_conf : dict of (str, object), optional
Key word parameters propagated to :func:`gensim.utils.getNS` to get a Pyro4 nameserver.
Only used if `distributed` is set to True.
minimum_phi_value : float, optional
if `per_word_topics` is True, this represents a lower bound on the term probabilities.
per_word_topics : bool
If True, the model also computes a list of topics, sorted in descending order of most likely topics for
each word, along with their phi values multiplied by the feature length (i.e. word count).
callbacks : list of :class:`~gensim.models.callbacks.Callback`
Metric callbacks to log and visualize evaluation metrics of the model during training.
dtype : {numpy.float16, numpy.float32, numpy.float64}, optional
Data-type to use during calculations inside model. All inputs are also converted.
"""
self.dtype = np.finfo(dtype).dtype
# store user-supplied parameters
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.minimum_phi_value = minimum_phi_value
self.per_word_topics = per_word_topics
self.callbacks = callbacks
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
assert self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
self.random_state = utils.get_random_state(random_state)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# set up distributed environment if necessary
if not distributed:
logger.info("using serial LDA version on this node")
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError("auto-optimizing alpha not implemented in distributed LDA")
# set up distributed version
try:
import Pyro4
if ns_conf is None:
ns_conf = {}
with utils.getNS(**ns_conf) as ns:
from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX
self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
logger.debug("looking for dispatcher at %s" % str(self.dispatcher._pyroUri))
self.dispatcher.initialize(
id2word=self.id2word, num_topics=self.num_topics, chunksize=chunksize,
alpha=alpha, eta=eta, distributed=False
)
self.numworkers = len(self.dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
logger.error("failed to initialize distributed LDA (%s)", err)
raise RuntimeError("failed to initialize distributed LDA (%s)" % err)
# Initialize the variational distribution q(beta|lambda)
self.state = LdaState(self.eta, (self.num_topics, self.num_terms), dtype=self.dtype)
self.state.sstats[...] = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# Check that we haven't accidentally fallen back to np.float64
assert self.eta.dtype == self.dtype
assert self.expElogbeta.dtype == self.dtype
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
use_numpy = self.dispatcher is not None
start = time.time()
self.update(corpus, chunks_as_numpy=use_numpy)
self.add_lifecycle_event(
"created",
msg=f"trained {self} in {time.time() - start:.2f}s",
)
def init_dir_prior(self, prior, name):
"""Initialize priors for the Dirichlet distribution.
Parameters
----------
prior : {float, numpy.ndarray of float, list of float, str}
A-priori belief on document-topic distribution. If `name` == 'alpha', then the prior can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
A-priori belief on topic-word distribution. If `name` == 'eta' then the prior can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
name : {'alpha', 'eta'}
Whether the `prior` is parameterized by the alpha vector (1 parameter per topic)
or by the eta (1 parameter per unique term in the vocabulary).
Returns
-------
init_prior: numpy.ndarray
Initialized Dirichlet prior:
If 'alpha' was provided as `name` the shape is (self.num_topics, ).
If 'eta' was provided as `name` the shape is (len(self.id2word), ).
is_auto: bool
Flag that shows if hyperparameter optimization should be used or not.
"""
if prior is None:
prior = 'symmetric'
if name == 'alpha':
prior_shape = self.num_topics
elif name == 'eta':
prior_shape = self.num_terms
else:
raise ValueError("'name' must be 'alpha' or 'eta'")
is_auto = False
if isinstance(prior, str):
if prior == 'symmetric':
logger.info("using symmetric %s at %s", name, 1.0 / self.num_topics)
init_prior = np.fromiter(
(1.0 / self.num_topics for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape,
)
elif prior == 'asymmetric':
if name == 'eta':
raise ValueError("The 'asymmetric' option cannot be used for eta")
init_prior = np.fromiter(
(1.0 / (i + np.sqrt(prior_shape)) for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape,
)
init_prior /= init_prior.sum()
logger.info("using asymmetric %s %s", name, list(init_prior))
elif prior == 'auto':
is_auto = True
init_prior = np.fromiter((1.0 / self.num_topics for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape)
if name == 'alpha':
logger.info("using autotuned %s, starting with %s", name, list(init_prior))
else:
raise ValueError("Unable to determine proper %s value given '%s'" % (name, prior))
elif isinstance(prior, list):
init_prior = np.asarray(prior, dtype=self.dtype)
elif isinstance(prior, np.ndarray):
init_prior = prior.astype(self.dtype, copy=False)
elif isinstance(prior, (np.number, numbers.Real)):
init_prior = np.fromiter((prior for i in range(prior_shape)), dtype=self.dtype)
else:
raise ValueError("%s must be either a np array of scalars, list of scalars, or scalar" % name)
return init_prior, is_auto
def __str__(self):
"""Get a string representation of the current object.
Returns
-------
str
Human readable representation of the most important model parameters.
"""
return "%s<num_terms=%s, num_topics=%s, decay=%s, chunksize=%s>" % (
self.__class__.__name__, self.num_terms, self.num_topics, self.decay, self.chunksize
)
def sync_state(self, current_Elogbeta=None):
"""Propagate the states topic probabilities to the inner object's attribute.
Parameters
----------
current_Elogbeta: numpy.ndarray
Posterior probabilities for each topic, optional.
If omitted, it will get Elogbeta from state.
"""
if current_Elogbeta is None:
current_Elogbeta = self.state.get_Elogbeta()
self.expElogbeta = np.exp(current_Elogbeta)
assert self.expElogbeta.dtype == self.dtype
def clear(self):
"""Clear the model's state to free some memory. Used in the distributed implementation."""
self.state = None
self.Elogbeta = None
def inference(self, chunk, collect_sstats=False):
"""Given a chunk of sparse document vectors, estimate gamma (parameters controlling the topic weights)
for each document in the chunk.
This function does not modify the model. The whole input chunk of document is assumed to fit in RAM;
chunking of a large corpus must be done earlier in the pipeline. Avoids computing the `phi` variational
parameter directly using the optimization presented in
`Lee, Seung: Algorithms for non-negative matrix factorization"
<https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf>`_.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
collect_sstats : bool, optional
If set to True, also collect (and return) sufficient statistics needed to update the model's topic-word
distributions.
Returns
-------
(numpy.ndarray, {numpy.ndarray, None})
The first element is always returned and it corresponds to the states gamma matrix. The second element is
only returned if `collect_sstats` == True and corresponds to the sufficient statistics for the M step.
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
gamma = self.random_state.gamma(100., 1. / 100., (len(chunk), self.num_topics)).astype(self.dtype, copy=False)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
assert Elogtheta.dtype == self.dtype
assert expElogtheta.dtype == self.dtype
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta, dtype=self.dtype)
else:
sstats = None
converged = 0
# Now, for each document d update that document's gamma and phi
# Inference code copied from Hoffman's `onlineldavb.py` (esp. the
# Lee&Seung trick which speeds things up by an order of magnitude, compared
# to Blei's original LDA-C code, cool!).
integer_types = (int, np.integer,)
epsilon = np.finfo(self.dtype).eps
for d, doc in enumerate(chunk):
if len(doc) > 0 and not isinstance(doc[0][0], integer_types):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
cts = np.fromiter((cnt for _, cnt in doc), dtype=self.dtype, count=len(doc))
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_kw.
# phinorm is the normalizer.
# TODO treat zeros explicitly, instead of adding epsilon?
phinorm = np.dot(expElogthetad, expElogbetad) + epsilon
# Iterate between gamma and phi until convergence
for _ in range(self.iterations):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self.alpha + expElogthetad * np.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = np.exp(Elogthetad)
phinorm = np.dot(expElogthetad, expElogbetad) + epsilon
# If gamma hasn't changed much, we're done.
meanchange = mean_absolute_difference(gammad, lastgamma)
if meanchange < self.gamma_threshold:
converged += 1
break
gamma[d, :] = gammad
assert gammad.dtype == self.dtype
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += np.outer(expElogthetad.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations", converged, len(chunk), self.iterations)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
assert sstats.dtype == self.dtype
assert gamma.dtype == self.dtype
return gamma, sstats
def do_estep(self, chunk, state=None):
"""Perform inference on a chunk of documents, and accumulate the collected sufficient statistics.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
state : :class:`~gensim.models.ldamodel.LdaState`, optional
The state to be updated with the newly accumulated sufficient statistics. If none, the models
`self.state` is updated.
Returns
-------
numpy.ndarray
Gamma parameters controlling the topic weights, shape (`len(chunk)`, `self.num_topics`).
"""
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0] # avoids calling len(chunk) on a generator
assert gamma.dtype == self.dtype
return gamma
def update_alpha(self, gammat, rho):
"""Update parameters for the Dirichlet prior on the per-document topic weights.
Parameters
----------
gammat : numpy.ndarray
Previous topic weight parameters.
rho : float
Learning rate.
Returns
-------
numpy.ndarray
Sequence of alpha parameters.
"""
N = float(len(gammat))
logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N
assert logphat.dtype == self.dtype
self.alpha = update_dir_prior(self.alpha, N, logphat, rho)
logger.info("optimized alpha %s", list(self.alpha))
assert self.alpha.dtype == self.dtype
return self.alpha
def update_eta(self, lambdat, rho):
"""Update parameters for the Dirichlet prior on the per-topic word weights.
Parameters
----------
lambdat : numpy.ndarray
Previous lambda parameters.
rho : float
Learning rate.
Returns
-------
numpy.ndarray
The updated eta parameters.
"""
N = float(lambdat.shape[0])
logphat = (sum(dirichlet_expectation(lambda_) for lambda_ in lambdat) / N).reshape((self.num_terms,))
assert logphat.dtype == self.dtype
self.eta = update_dir_prior(self.eta, N, logphat, rho)
assert self.eta.dtype == self.dtype
return self.eta
def log_perplexity(self, chunk, total_docs=None):
"""Calculate and return per-word likelihood bound, using a chunk of documents as evaluation corpus.
Also output the calculated statistics, including the perplexity=2^(-bound), to log at INFO level.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
total_docs : int, optional
Number of docs used for evaluation of the perplexity.
Returns
-------
numpy.ndarray
The variational bound score calculated for each word.
"""
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""Train the model with new documents, by EM-iterating over the corpus until the topics converge, or until
the maximum number of allowed iterations is reached. `corpus` must be an iterable.
In distributed mode, the E step is distributed over a cluster of machines.
Notes
-----
This update also supports updating an already trained model (`self`) with new documents from `corpus`;
the two models are then merged in proportion to the number of old vs. new documents.
This feature is still experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of `'Online Learning for LDA' by Hoffman et al.`_
and is guaranteed to converge for any `decay` in (0.5, 1].
Additionally, for smaller corpus sizes,
an increasing `offset` may be beneficial (see Table 1 in the same paper).
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`) used to update the
model.
chunksize : int, optional
Number of documents to be used in each training chunk.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
passes : int, optional
Number of passes through the corpus during training.
update_every : int, optional
Number of documents to be iterated through for each update.
Set to 0 for batch learning, > 1 for online iterative learning.
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
chunks_as_numpy : bool, optional
Whether each chunk passed to the inference step should be a numpy.ndarray or not. Numpy can in some settings
turn the term IDs into floats, these will be converted back into integers in inference, which incurs a
performance hit. For distributed computing it may be desirable to keep the chunks as `numpy.ndarray`.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
try:
lencorpus = len(corpus)
except Exception:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaModel.update() called with an empty corpus")
return
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
if passes == 1:
updatetype += " (single-pass)"
else:
updatetype += " (multi-pass)"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
if self.callbacks:
# pass the list of input callbacks to Callback class
callback = Callback(self.callbacks)
callback.set_model(self)
# initialize metrics list to store metric values after every epoch
self.metrics = defaultdict(list)
for pass_ in range(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
reallen = 0
chunks = utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy, dtype=self.dtype)
for chunk_no, chunk in enumerate(chunks):
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# append current epoch's metric values
if self.callbacks:
current_metrics = callback.on_epoch_end(pass_)
for metric, value in current_metrics.items():
self.metrics[metric].append(value)
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
dirty = False
def do_mstep(self, rho, other, extra_pass=False):
"""Maximization step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.
Parameters
----------
rho : float
Learning rate.
other : :class:`~gensim.models.ldamodel.LdaModel`
The model whose sufficient statistics will be used to update the topics.
extra_pass : bool, optional
Whether this step required an additional pass over the corpus.
"""
logger.debug("updating topics")
# update self with the new blend; also keep track of how much did
# the topics change through this update, to assess convergence
previous_Elogbeta = self.state.get_Elogbeta()
self.state.blend(rho, other)
current_Elogbeta = self.state.get_Elogbeta()
self.sync_state(current_Elogbeta)
# print out some debug info at the end of each EM iteration
self.print_topics(5)
diff = mean_absolute_difference(previous_Elogbeta.ravel(), current_Elogbeta.ravel())
logger.info("topic diff=%f, rho=%f", diff, rho)
if self.optimize_eta:
self.update_eta(self.state.get_lambda(), rho)
if not extra_pass:
# only update if this isn't an additional pass
self.num_updates += other.numdocs
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
"""Estimate the variational bound of documents from the corpus as E_q[log p(corpus)] - E_q[log q(corpus)].
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`) used to estimate the
variational bounds.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
subsample_ratio : float, optional
Percentage of the whole corpus represented by the passed `corpus` argument (in case this was a sample).
Set to 1.0 if the whole corpus was passed.This is used as a multiplicative factor to scale the likelihood
appropriately.
Returns
-------
numpy.ndarray
The variational bound score calculated for each document.
"""
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i", d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
assert gammad.dtype == self.dtype
assert Elogthetad.dtype == self.dtype
# E[log p(doc | theta, beta)]
score += sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += np.sum((self.alpha - gammad) * Elogthetad)
score += np.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))
# Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += np.sum((self.eta - _lambda) * Elogbeta)
score += np.sum(gammaln(_lambda) - gammaln(self.eta))
if np.ndim(self.eta) == 0:
sum_eta = self.eta * self.num_terms
else:
sum_eta = np.sum(self.eta)
score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
return score
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Get a representation for selected topics.
Parameters
----------
num_topics : int, optional
Number of topics to be returned. Unlike LSA, there is no natural ordering between the topics in LDA.
The returned topics subset of all topics is therefore arbitrary and may change between two LDA
training runs.
num_words : int, optional
Number of words to be presented for each topic. These will be the most relevant words (assigned the highest
probability for each topic).
log : bool, optional
Whether the output is also logged, besides being returned.
formatted : bool, optional
Whether the topic representations should be formatted as strings. If False, they are returned as
2 tuples of (word, probability).
Returns
-------
list of {str, tuple of (str, float)}
a list of topics, each represented either as a string (when `formatted` == True) or word-probability
pairs.
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * self.random_state.rand(len(self.alpha))
# random_state.rand returns float64, but converting back to dtype won't speed up anything
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[:num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
topic = self.state.get_lambda()
for i in chosen_topics:
topic_ = topic[i]
topic_ = topic_ / topic_.sum() # normalize to probability distribution
bestn = matutils.argsort(topic_, num_words, reverse=True)
topic_ = [(self.id2word[id], topic_[id]) for id in bestn]
if formatted:
topic_ = ' + '.join('%.3f*"%s"' % (v, k) for k, v in topic_)
shown.append((i, topic_))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic_)
return shown
def show_topic(self, topicid, topn=10):
"""Get the representation for a single topic. Words here are the actual strings, in constrast to
:meth:`~gensim.models.ldamodel.LdaModel.get_topic_terms` that represents words by their vocabulary ID.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
Returns
-------
list of (str, float)
Word - probability pairs for the most relevant words generated by the topic.
"""
return [(self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn)]
def get_topics(self):
"""Get the term-topic matrix learned during inference.
Returns
-------
numpy.ndarray
The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
"""
topics = self.state.get_lambda()
return topics / topics.sum(axis=1)[:, None]
def get_topic_terms(self, topicid, topn=10):
"""Get the representation for a single topic. Words the integer IDs, in constrast to
:meth:`~gensim.models.ldamodel.LdaModel.show_topic` that represents words by the actual strings.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
Returns
-------
list of (int, float)
Word ID - probability pairs for the most relevant words generated by the topic.
"""
topic = self.get_topics()[topicid]
topic = topic / topic.sum() # normalize to probability distribution
bestn = matutils.argsort(topic, topn, reverse=True)
return [(idx, topic[idx]) for idx in bestn]
def top_topics(self, corpus=None, texts=None, dictionary=None, window_size=None,
coherence='u_mass', topn=20, processes=-1):
"""Get the topics with the highest coherence score the coherence for each topic.
Parameters
----------
corpus : iterable of list of (int, float), optional
Corpus in BoW format.
texts : list of list of str, optional
Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)
probability estimator .
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping of id word to create corpus.
If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.
window_size : int, optional
Is the size of the window to be used for coherence measures using boolean sliding window as their
probability estimator. For 'u_mass' this doesn't matter.
If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used.
Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.
For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus
using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
processes : int, optional
Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as
num_cpus - 1.
Returns
-------
list of (list of (int, str), float)
Each element in the list is a pair of a topic representation and its coherence score. Topic representations
are distributions of words, represented as a list of pairs of word IDs and their probabilities.
"""
cm = CoherenceModel(
model=self, corpus=corpus, texts=texts, dictionary=dictionary,
window_size=window_size, coherence=coherence, topn=topn,
processes=processes
)
coherence_scores = cm.get_coherence_per_topic()
str_topics = []
for topic in self.get_topics(): # topic = array of vocab_size floats, one per term
bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic
beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token
str_topics.append(beststr) # list of topn (float membership, token) tuples
scored_topics = zip(str_topics, coherence_scores)
return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)
def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None,
per_word_topics=False):
"""Get the topic distribution for the given document.
Parameters
----------
bow : corpus : list of (int, float)
The document in BOW format.
minimum_probability : float
Topics with an assigned probability lower than this threshold will be discarded.
minimum_phi_value : float
If `per_word_topics` is True, this represents a lower bound on the term probabilities that are included.
If set to None, a value of 1e-8 is used to prevent 0s.
per_word_topics : bool
If True, this function will also return two extra lists as explained in the "Returns" section.
Returns
-------
list of (int, float)
Topic distribution for the whole document. Each element in the list is a pair of a topic's id, and
the probability that was assigned to it.
list of (int, list of (int, float), optional
Most probable topics per word. Each element in the list is a pair of a word's id, and a list of
topics sorted by their relevance to this word. Only returned if `per_word_topics` was set to True.
list of (int, list of float), optional
Phi relevance values, multiplied by the feature length, for each word-topic combination.
Each element in the list is a pair of a word's id and a list of the phi values between this word and
each topic. Only returned if `per_word_topics` was set to True.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
if minimum_phi_value is None:
minimum_phi_value = self.minimum_probability
minimum_phi_value = max(minimum_phi_value, 1e-8) # never allow zero values in sparse output
# if the input vector is a corpus, return a transformed corpus
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(
per_word_topics=per_word_topics,
minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value
)
return self._apply(corpus, **kwargs)
gamma, phis = self.inference([bow], collect_sstats=per_word_topics)
topic_dist = gamma[0] / sum(gamma[0]) # normalize distribution
document_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
if not per_word_topics:
return document_topics
word_topic = [] # contains word and corresponding topic
word_phi = [] # contains word and phi values
for word_type, weight in bow:
phi_values = [] # contains (phi_value, topic) pairing to later be sorted
phi_topic = [] # contains topic and corresponding phi value to be returned 'raw' to user
for topic_id in range(0, self.num_topics):
if phis[topic_id][word_type] >= minimum_phi_value:
# appends phi values for each topic for that word
# these phi values are scaled by feature length
phi_values.append((phis[topic_id][word_type], topic_id))
phi_topic.append((topic_id, phis[topic_id][word_type]))
# list with ({word_id => [(topic_0, phi_value), (topic_1, phi_value) ...]).
word_phi.append((word_type, phi_topic))
# sorts the topics based on most likely topic
# returns a list like ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).
sorted_phi_values = sorted(phi_values, reverse=True)
topics_sorted = [x[1] for x in sorted_phi_values]
word_topic.append((word_type, topics_sorted))
return document_topics, word_topic, word_phi # returns 2-tuple
def get_term_topics(self, word_id, minimum_probability=None):
"""Get the most relevant topics to the given word.
Parameters
----------
word_id : int
The word for which the topic distribution will be computed.
minimum_probability : float, optional
Topics with an assigned probability below this threshold will be discarded.
Returns
-------
list of (int, float)
The relevant topics represented as pairs of their ID and their assigned probability, sorted
by relevance to the given word.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
# if user enters word instead of id in vocab, change to get id
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
for topic_id in range(0, self.num_topics):
if self.expElogbeta[topic_id][word_id] >= minimum_probability:
values.append((topic_id, self.expElogbeta[topic_id][word_id]))
return values
def diff(self, other, distance="kullback_leibler", num_words=100,
n_ann_terms=10, diagonal=False, annotation=True, normed=True):
"""Calculate the difference in topic distributions between two models: `self` and `other`.
Parameters
----------
other : :class:`~gensim.models.ldamodel.LdaModel`
The model which will be compared against the current object.
distance : {'kullback_leibler', 'hellinger', 'jaccard', 'jensen_shannon'}
The distance metric to calculate the difference with.
num_words : int, optional
The number of most relevant words used if `distance == 'jaccard'`. Also used for annotating topics.
n_ann_terms : int, optional
Max number of words in intersection/symmetric difference between topics. Used for annotation.
diagonal : bool, optional
Whether we need the difference between identical topics (the diagonal of the difference matrix).
annotation : bool, optional
Whether the intersection or difference of words between two topics should be returned.
normed : bool, optional
Whether the matrix should be normalized or not.
Returns
-------
numpy.ndarray
A difference matrix. Each element corresponds to the difference between the two topics,
shape (`self.num_topics`, `other.num_topics`)
numpy.ndarray, optional
Annotation matrix where for each pair we include the word from the intersection of the two topics,
and the word from the symmetric difference of the two topics. Only included if `annotation == True`.
Shape (`self.num_topics`, `other_model.num_topics`, 2).
Examples
--------
Get the differences between each pair of topics inferred by two models
.. sourcecode:: pycon
>>> from gensim.models.ldamulticore import LdaMulticore
>>> from gensim.test.utils import datapath
>>>
>>> m1 = LdaMulticore.load(datapath("lda_3_0_1_model"))
>>> m2 = LdaMulticore.load(datapath("ldamodel_python_3_5"))
>>> mdiff, annotation = m1.diff(m2)
>>> topic_diff = mdiff # get matrix with difference for each topic pair from `m1` and `m2`
"""
distances = {
"kullback_leibler": kullback_leibler,
"hellinger": hellinger,
"jaccard": jaccard_distance,
"jensen_shannon": jensen_shannon
}
if distance not in distances:
valid_keys = ", ".join("`{}`".format(x) for x in distances.keys())
raise ValueError("Incorrect distance, valid only {}".format(valid_keys))
if not isinstance(other, self.__class__):
raise ValueError("The parameter `other` must be of type `{}`".format(self.__name__))
distance_func = distances[distance]
d1, d2 = self.get_topics(), other.get_topics()
t1_size, t2_size = d1.shape[0], d2.shape[0]
annotation_terms = None
fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in range(t1_size)]
snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in range(t2_size)]
if distance == "jaccard":
d1, d2 = fst_topics, snd_topics
if diagonal:
assert t1_size == t2_size, \
"Both input models should have same no. of topics, " \
"as the diagonal will only be valid in a square matrix"
# initialize z and annotation array
z = np.zeros(t1_size)
if annotation:
annotation_terms = np.zeros(t1_size, dtype=list)
else:
# initialize z and annotation matrix
z = np.zeros((t1_size, t2_size))
if annotation:
annotation_terms = np.zeros((t1_size, t2_size), dtype=list)
# iterate over each cell in the initialized z and annotation
for topic in np.ndindex(z.shape):
topic1 = topic[0]
if diagonal:
topic2 = topic1
else:
topic2 = topic[1]
z[topic] = distance_func(d1[topic1], d2[topic2])
if annotation:
pos_tokens = fst_topics[topic1] & snd_topics[topic2]
neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])
pos_tokens = list(pos_tokens)[:min(len(pos_tokens), n_ann_terms)]
neg_tokens = list(neg_tokens)[:min(len(neg_tokens), n_ann_terms)]
annotation_terms[topic] = [pos_tokens, neg_tokens]
if normed:
if np.abs(np.max(z)) > 1e-8:
z /= np.max(z)
return z, annotation_terms
def __getitem__(self, bow, eps=None):
"""Get the topic distribution for the given document.
Wraps :meth:`~gensim.models.ldamodel.LdaModel.get_document_topics` to support an operator style call.
Uses the model's current state (set using constructor arguments) to fill in the additional arguments of the
wrapper method.
Parameters
---------
bow : list of (int, float)
The document in BOW format.
eps : float, optional
Topics with an assigned probability lower than this threshold will be discarded.
Returns
-------
list of (int, float)
Topic distribution for the given document. Each topic is represented as a pair of its ID and the probability
assigned to it.
"""
return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)
def save(self, fname, ignore=('state', 'dispatcher'), separately=None, *args, **kwargs):
"""Save the model to a file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
Notes
-----
If you intend to use models across Python 2/3 versions there are a few things to
keep in mind:
1. The pickled Python dictionaries will not work across Python versions
2. The `save` method does not automatically save all numpy arrays separately, only
those ones that exceed `sep_limit` set in :meth:`~gensim.utils.SaveLoad.save`. The main
concern here is the `alpha` array if for instance using `alpha='auto'`.
Please refer to the `wiki recipes section
<https://github.com/RaRe-Technologies/gensim/wiki/
Recipes-&-FAQ#q9-how-do-i-load-a-model-in-python-3-that-was-trained-and-saved-using-python-2>`_
for an example on how to work around these issues.
See Also
--------
:meth:`~gensim.models.ldamodel.LdaModel.load`
Load model.
Parameters
----------
fname : str
Path to the system file where the model will be persisted.
ignore : tuple of str, optional
The named attributes in the tuple will be left out of the pickled model. The reason why
the internal `state` is ignored by default is that it uses its own serialisation rather than the one
provided by this method.
separately : {list of str, None}, optional
If None - automatically detect large numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and allows `mmap`'ing large arrays
back on load efficiently. If list of str - this attributes will be stored in separate files,
the automatic check is not performed in this case.
*args
Positional arguments propagated to :meth:`~gensim.utils.SaveLoad.save`.
**kwargs
Key word arguments propagated to :meth:`~gensim.utils.SaveLoad.save`.
"""
if self.state is not None:
self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)
# Save the dictionary separately if not in 'ignore'.
if 'id2word' not in ignore:
utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))
# make sure 'state', 'id2word' and 'dispatcher' are ignored from the pickled object, even if
# someone sets the ignore list themselves
if ignore is not None and ignore:
if isinstance(ignore, str):
ignore = [ignore]
ignore = [e for e in ignore if e] # make sure None and '' are not in the list
ignore = list({'state', 'dispatcher', 'id2word'} | set(ignore))
else:
ignore = ['state', 'dispatcher', 'id2word']
# make sure 'expElogbeta' and 'sstats' are ignored from the pickled object, even if
# someone sets the separately list themselves.
separately_explicit = ['expElogbeta', 'sstats']
# Also add 'alpha' and 'eta' to separately list if they are set 'auto' or some
# array manually.
if (isinstance(self.alpha, str) and self.alpha == 'auto') or \
(isinstance(self.alpha, np.ndarray) and len(self.alpha.shape) != 1):
separately_explicit.append('alpha')
if (isinstance(self.eta, str) and self.eta == 'auto') or \
(isinstance(self.eta, np.ndarray) and len(self.eta.shape) != 1):
separately_explicit.append('eta')
# Merge separately_explicit with separately.
if separately:
if isinstance(separately, str):
separately = [separately]
separately = [e for e in separately if e] # make sure None and '' are not in the list
separately = list(set(separately_explicit) | set(separately))
else:
separately = separately_explicit
super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously saved :class:`gensim.models.ldamodel.LdaModel` from file.
See Also
--------
:meth:`~gensim.models.ldamodel.LdaModel.save`
Save model.
Parameters
----------
fname : str
Path to the file where the model is stored.
*args
Positional arguments propagated to :meth:`~gensim.utils.SaveLoad.load`.
**kwargs
Key word arguments propagated to :meth:`~gensim.utils.SaveLoad.load`.
Examples
--------
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> fname = datapath("lda_3_0_1_model")
>>> lda = LdaModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LdaModel, cls).load(fname, *args, **kwargs)
# check if `random_state` attribute has been set after main pickle load
# if set -> the model to be loaded was saved using a >= 0.13.2 version of Gensim
# if not set -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so set `random_state` as the default value
if not hasattr(result, 'random_state'):
result.random_state = utils.get_random_state(None) # using default value `get_random_state(None)`
logging.warning("random_state not set so using default value")
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (cause it's default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
state_fname = utils.smart_extension(fname, '.state')
try:
result.state = LdaState.load(state_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load state from %s: %s", state_fname, e)
id2word_fname = utils.smart_extension(fname, '.id2word')
# check if `id2word_fname` file is present on disk
# if present -> the model to be loaded was saved using a >= 0.13.2 version of Gensim,
# so set `result.id2word` using the `id2word_fname` file
# if not present -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so `result.id2word` already set after the main pickle load
if os.path.isfile(id2word_fname):
try:
result.id2word = utils.unpickle(id2word_fname)
except Exception as e:
logging.warning("failed to load id2word dictionary from %s: %s", id2word_fname, e)
return result
| 75,209
|
Python
|
.py
| 1,402
| 42.619829
| 120
| 0.622766
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,133
|
callbacks.py
|
piskvorky_gensim/gensim/models/callbacks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 RARE Technologies <info@rare-technologies.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Callbacks can be used to observe the training process.
Since training in huge corpora can be time consuming, we want to offer the users some insight
into the process, in real time. In this way, convergence issues
or other potential problems can be identified early in the process,
saving precious time and resources.
The metrics exposed through this module can be used to construct Callbacks, which will be called
at specific points in the training process, such as "epoch starts" or "epoch finished".
These metrics can be used to assess mod's convergence or correctness, for example
to save the model, visualize intermediate results, or anything else.
Usage examples
--------------
To implement a Callback, inherit from this base class and override one or more of its methods.
Create a callback to save the training model after each epoch
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile
>>> from gensim.models.callbacks import CallbackAny2Vec
>>>
>>>
>>> class EpochSaver(CallbackAny2Vec):
... '''Callback to save model after each epoch.'''
...
... def __init__(self, path_prefix):
... self.path_prefix = path_prefix
... self.epoch = 0
...
... def on_epoch_end(self, model):
... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch))
... model.save(output_path)
... self.epoch += 1
...
Create a callback to print progress information to the console:
.. sourcecode:: pycon
>>> class EpochLogger(CallbackAny2Vec):
... '''Callback to log information about training'''
...
... def __init__(self):
... self.epoch = 0
...
... def on_epoch_begin(self, model):
... print("Epoch #{} start".format(self.epoch))
...
... def on_epoch_end(self, model):
... print("Epoch #{} end".format(self.epoch))
... self.epoch += 1
...
>>>
>>> epoch_logger = EpochLogger()
>>> w2v_model = Word2Vec(common_texts, epochs=5, vector_size=10, min_count=0, seed=42, callbacks=[epoch_logger])
Epoch #0 start
Epoch #0 end
Epoch #1 start
Epoch #1 end
Epoch #2 start
Epoch #2 end
Epoch #3 start
Epoch #3 end
Epoch #4 start
Epoch #4 end
Create and bind a callback to a topic model. This callback will log the perplexity metric in real time:
.. sourcecode:: pycon
>>> from gensim.models.callbacks import PerplexityMetric
>>> from gensim.models.ldamodel import LdaModel
>>> from gensim.test.utils import common_corpus, common_dictionary
>>>
>>> # Log the perplexity score at the end of each epoch.
>>> perplexity_logger = PerplexityMetric(corpus=common_corpus, logger='shell')
>>> lda = LdaModel(common_corpus, id2word=common_dictionary, num_topics=5, callbacks=[perplexity_logger])
"""
import gensim
import logging
import copy
import sys
import numpy as np
if sys.version_info[0] >= 3:
from queue import Queue
else:
from Queue import Queue
# Visdom is used for training stats visualization
try:
from visdom import Visdom
VISDOM_INSTALLED = True
except ImportError:
VISDOM_INSTALLED = False
class Metric:
"""Base Metric class for topic model evaluation metrics.
Concrete implementations include:
* :class:`~gensim.models.callbacks.CoherenceMetric`
* :class:`~gensim.models.callbacks.PerplexityMetric`
* :class:`~gensim.models.callbacks.DiffMetric`
* :class:`~gensim.models.callbacks.ConvergenceMetric`
"""
def __str__(self):
"""Get a string representation of Metric class.
Returns
-------
str
Human readable representation of the metric.
"""
if self.title is not None:
return self.title
else:
return type(self).__name__[:-6]
def set_parameters(self, **parameters):
"""Set the metric parameters.
Parameters
----------
**parameters
Keyword arguments to override the object's internal attributes.
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
def get_value(self):
"""Get the metric's value at this point in time.
Warnings
--------
The user **must** provide a concrete implementation for this method for every subclass of
this class.
See Also
--------
:meth:`gensim.models.callbacks.CoherenceMetric.get_value`
:meth:`gensim.models.callbacks.PerplexityMetric.get_value`
:meth:`gensim.models.callbacks.DiffMetric.get_value`
:meth:`gensim.models.callbacks.ConvergenceMetric.get_value`
Returns
-------
object
The metric's type depends on what exactly it measures. In the simplest case it might
be a real number corresponding to an error estimate. It could however be anything else
that is useful to report or visualize.
"""
raise NotImplementedError("Please provide an implementation for `get_value` in your subclass.")
class CoherenceMetric(Metric):
"""Metric class for coherence evaluation.
See Also
--------
:class:`~gensim.models.coherencemodel.CoherenceModel`
"""
def __init__(self, corpus=None, texts=None, dictionary=None, coherence=None,
window_size=None, topn=10, logger=None, viz_env=None, title=None):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
texts : list of char (str of length 1), optional
Tokenized texts needed for coherence models that use sliding window based probability estimator.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping from integer IDs to words, needed to create corpus. If `model.id2word` is present,
this is not needed. If both are provided, `dictionary` will be used.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used. 'c_uci' is also known as 'c_pmi' in the literature.
For 'u_mass', the corpus **MUST** be provided. If `texts` is provided, it will be converted
to corpus using the dictionary. For 'c_v', 'c_uci' and 'c_npmi', `texts` **MUST** be provided.
Corpus is not needed.
window_size : int, optional
Size of the window to be used for coherence measures using boolean
sliding window as their probability estimator. For 'u_mass' this doesn't matter.
If 'None', the default window sizes are used which are:
* `c_v` - 110
* `c_uci` - 10
* `c_npmi` - 10
topn : int, optional
Number of top words to be extracted from each topic.
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the coherence value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.corpus = corpus
self.dictionary = dictionary
self.coherence = coherence
self.texts = texts
self.window_size = window_size
self.topn = topn
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the coherence score.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
One of the following parameters are expected:
* `model` - pre-trained topic model of type :class:`~gensim.models.ldamodel.LdaModel`.
* `topics` - list of tokenized topics.
Returns
-------
float
The coherence score.
"""
# only one of the model or topic would be defined
self.model = None
self.topics = None
super(CoherenceMetric, self).set_parameters(**kwargs)
cm = gensim.models.CoherenceModel(
model=self.model, topics=self.topics, texts=self.texts, corpus=self.corpus,
dictionary=self.dictionary, window_size=self.window_size,
coherence=self.coherence, topn=self.topn
)
return cm.get_coherence()
class PerplexityMetric(Metric):
"""Metric class for perplexity evaluation."""
def __init__(self, corpus=None, logger=None, viz_env=None, title=None):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the perplexity value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.corpus = corpus
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the coherence score.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
A trained topic model is expected using the 'model' key.
This must be of type :class:`~gensim.models.ldamodel.LdaModel`.
Returns
-------
float
The perplexity score.
"""
super(PerplexityMetric, self).set_parameters(**kwargs)
corpus_words = sum(cnt for document in self.corpus for _, cnt in document)
perwordbound = self.model.bound(self.corpus) / corpus_words
return np.exp2(-perwordbound)
class DiffMetric(Metric):
"""Metric class for topic difference evaluation."""
def __init__(self, distance="jaccard", num_words=100, n_ann_terms=10, diagonal=True,
annotation=False, normed=True, logger=None, viz_env=None, title=None):
"""
Parameters
----------
distance : {'kullback_leibler', 'hellinger', 'jaccard'}, optional
Measure used to calculate difference between any topic pair.
num_words : int, optional
The number of most relevant words used if `distance == 'jaccard'`. Also used for annotating topics.
n_ann_terms : int, optional
Max number of words in intersection/symmetric difference between topics. Used for annotation.
diagonal : bool, optional
Whether we need the difference between identical topics (the diagonal of the difference matrix).
annotation : bool, optional
Whether the intersection or difference of words between two topics should be returned.
normed : bool, optional
Whether the matrix should be normalized or not.
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the coherence value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.distance = distance
self.num_words = num_words
self.n_ann_terms = n_ann_terms
self.diagonal = diagonal
self.annotation = annotation
self.normed = normed
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the difference between each pair of topics in two topic models.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
Two models of type :class:`~gensim.models.ldamodelLdaModel`
are expected using the keys `model` and `other_model`.
Returns
-------
np.ndarray of shape (`model.num_topics`, `other_model.num_topics`)
Matrix of differences between each pair of topics.
np.ndarray of shape (`model.num_topics`, `other_model.num_topics`, 2), optional
Annotation matrix where for each pair we include the word from the intersection of the two topics,
and the word from the symmetric difference of the two topics. Only included if `annotation == True`.
"""
super(DiffMetric, self).set_parameters(**kwargs)
diff_diagonal, _ = self.model.diff(
self.other_model, self.distance, self.num_words, self.n_ann_terms,
self.diagonal, self.annotation, self.normed
)
return diff_diagonal
class ConvergenceMetric(Metric):
"""Metric class for convergence evaluation. """
def __init__(self, distance="jaccard", num_words=100, n_ann_terms=10, diagonal=True,
annotation=False, normed=True, logger=None, viz_env=None, title=None):
"""
Parameters
----------
distance : {'kullback_leibler', 'hellinger', 'jaccard'}, optional
Measure used to calculate difference between any topic pair.
num_words : int, optional
The number of most relevant words used if `distance == 'jaccard'`. Also used for annotating topics.
n_ann_terms : int, optional
Max number of words in intersection/symmetric difference between topics. Used for annotation.
diagonal : bool, optional
Whether we need the difference between identical topics (the diagonal of the difference matrix).
annotation : bool, optional
Whether the intersection or difference of words between two topics should be returned.
normed : bool, optional
Whether the matrix should be normalized or not.
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the coherence value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.distance = distance
self.num_words = num_words
self.n_ann_terms = n_ann_terms
self.diagonal = diagonal
self.annotation = annotation
self.normed = normed
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the sum of each element in the difference matrix between each pair of topics in two topic models.
A small difference between the partially trained models produced by subsequent training iterations can indicate
that the model has stopped significantly improving and has therefore converged to a local or global optimum.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
Two models of type :class:`~gensim.models.ldamodel.LdaModel`
are expected using the keys `model` and `other_model`.
Returns
-------
float
The sum of the difference matrix between two trained topic models (usually the same model after two
subsequent training iterations).
"""
super(ConvergenceMetric, self).set_parameters(**kwargs)
diff_diagonal, _ = self.model.diff(
self.other_model, self.distance, self.num_words, self.n_ann_terms,
self.diagonal, self.annotation, self.normed
)
return np.sum(diff_diagonal)
class Callback:
"""A class representing routines called reactively at specific phases during trained.
These can be used to log or visualize the training progress using any of the metric scores developed before.
The values are stored at the end of each training epoch. The following metric scores are currently available:
* :class:`~gensim.models.callbacks.CoherenceMetric`
* :class:`~gensim.models.callbacks.PerplexityMetric`
* :class:`~gensim.models.callbacks.DiffMetric`
* :class:`~gensim.models.callbacks.ConvergenceMetric`
"""
def __init__(self, metrics):
"""
Parameters
----------
metrics : list of :class:`~gensim.models.callbacks.Metric`
The list of metrics to be reported by the callback.
"""
self.metrics = metrics
def set_model(self, model):
"""Save the model instance and initialize any required variables which would be updated throughout training.
Parameters
----------
model : :class:`~gensim.models.basemodel.BaseTopicModel`
The model for which the training will be reported (logged or visualized) by the callback.
"""
self.model = model
self.previous = None
# check for any metric which need model state from previous epoch
if any(isinstance(metric, (DiffMetric, ConvergenceMetric)) for metric in self.metrics):
self.previous = copy.deepcopy(model)
# store diff diagonals of previous epochs
self.diff_mat = Queue()
if any(metric.logger == "visdom" for metric in self.metrics):
if not VISDOM_INSTALLED:
raise ImportError("Please install Visdom for visualization")
self.viz = Visdom()
# store initial plot windows of every metric (same window will be updated with increasing epochs)
self.windows = []
if any(metric.logger == "shell" for metric in self.metrics):
# set logger for current topic model
self.log_type = logging.getLogger('gensim.models.ldamodel')
def on_epoch_end(self, epoch, topics=None):
"""Report the current epoch's metric value.
Called at the end of each training iteration.
Parameters
----------
epoch : int
The epoch that just ended.
topics : list of list of str, optional
List of tokenized topics. This is required for the coherence metric.
Returns
-------
dict of (str, object)
Mapping from metric names to their values. The type of each value depends on the metric type,
for example :class:`~gensim.models.callbacks.DiffMetric` computes a matrix while
:class:`~gensim.models.callbacks.ConvergenceMetric` computes a float.
"""
# stores current epoch's metric values
current_metrics = {}
# plot all metrics in current epoch
for i, metric in enumerate(self.metrics):
label = str(metric)
value = metric.get_value(topics=topics, model=self.model, other_model=self.previous)
current_metrics[label] = value
if metric.logger == "visdom":
if epoch == 0:
if value.ndim > 0:
diff_mat = np.array([value])
viz_metric = self.viz.heatmap(
X=diff_mat.T, env=metric.viz_env, opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
# store current epoch's diff diagonal
self.diff_mat.put(diff_mat)
# saving initial plot window
self.windows.append(copy.deepcopy(viz_metric))
else:
viz_metric = self.viz.line(
Y=np.array([value]), X=np.array([epoch]), env=metric.viz_env,
opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
# saving initial plot window
self.windows.append(copy.deepcopy(viz_metric))
else:
if value.ndim > 0:
# concatenate with previous epoch's diff diagonals
diff_mat = np.concatenate((self.diff_mat.get(), np.array([value])))
self.viz.heatmap(
X=diff_mat.T, env=metric.viz_env, win=self.windows[i],
opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
self.diff_mat.put(diff_mat)
else:
self.viz.line(
Y=np.array([value]),
X=np.array([epoch]),
env=metric.viz_env,
win=self.windows[i],
update='append'
)
if metric.logger == "shell":
statement = "".join(("Epoch ", str(epoch), ": ", label, " estimate: ", str(value)))
self.log_type.info(statement)
# check for any metric which need model state from previous epoch
if any(isinstance(metric, (DiffMetric, ConvergenceMetric)) for metric in self.metrics):
self.previous = copy.deepcopy(self.model)
return current_metrics
class CallbackAny2Vec:
"""Base class to build callbacks for :class:`~gensim.models.word2vec.Word2Vec` & subclasses.
Callbacks are used to apply custom functions over the model at specific points
during training (epoch start, batch end etc.). This is a base class and its purpose is to be inherited by
custom Callbacks that implement one or more of its methods (depending on the point during training where they
want some action to be taken).
See examples at the module level docstring for how to define your own callbacks by inheriting from this class.
As of gensim 4.0.0, the following callbacks are no longer supported, and overriding them will have no effect:
- on_batch_begin
- on_batch_end
"""
def on_epoch_begin(self, model):
"""Method called at the start of each epoch.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec` or subclass
Current model.
"""
pass
def on_epoch_end(self, model):
"""Method called at the end of each epoch.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec` or subclass
Current model.
"""
pass
def on_train_begin(self, model):
"""Method called at the start of the training process.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec` or subclass
Current model.
"""
pass
def on_train_end(self, model):
"""Method called at the end of the training process.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec` or subclass
Current model.
"""
pass
| 24,318
|
Python
|
.py
| 514
| 37.453307
| 120
| 0.624193
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,134
|
fasttext_inner.pyx
|
piskvorky_gensim/gensim/models/fasttext_inner.pyx
|
#!/usr/bin/env cython
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
"""Optimized Cython functions for training a :class:`~gensim.models.fasttext.FastText` model.
The main entry point is :func:`~gensim.models.fasttext_inner.train_batch_any`
which may be called directly from Python code.
Notes
-----
The implementation of the above functions heavily depends on the
FastTextConfig struct defined in :file:`gensim/models/fasttext_inner.pxd`.
The gensim.models.word2vec.FAST_VERSION value reports what flavor of BLAS
we're currently using:
0: double
1: float
2: no BLAS, use Cython loops instead
See Also
--------
`Basic Linear Algebra Subprograms <http://www.netlib.org/blas/>`_
"""
import cython
import numpy as np
cimport numpy as np
from libc.math cimport exp
from libc.math cimport log
from libc.string cimport memset
#
# We make use of the following BLAS functions (or their analogs, if BLAS is
# unavailable):
#
# scopy(dimensionality, x, inc_x, y, inc_y):
# Performs y = x
#
# sscal: y *= alpha
#
# saxpy(dimensionality, alpha, x, inc_x, y, inc_y):
# Calculates y = y + alpha * x (Single precision A*X Plus Y).
#
# sdot: dot product
#
# The increments (inc_x and inc_y) are usually 1 in our case.
#
# The versions are as chosen in word2vec_inner.pyx, and aliased to `our_` functions
from gensim.models.word2vec_inner cimport bisect_left, random_int32, scopy, sscal, \
REAL_t, our_dot, our_saxpy
DEF MAX_SENTENCE_LEN = 10000
DEF MAX_SUBWORDS = 1000
DEF EXP_TABLE_SIZE = 512
DEF MAX_EXP = 8
cdef REAL_t[EXP_TABLE_SIZE] EXP_TABLE
cdef REAL_t[EXP_TABLE_SIZE] LOG_TABLE
cdef int ONE = 1
cdef REAL_t ONEF = <REAL_t>1.0
cdef void fasttext_fast_sentence_sg_neg(FastTextConfig *c, int i, int j) nogil:
"""Perform skipgram training with negative sampling.
Parameters
----------
c : FastTextConfig *
A pointer to a fully initialized and populated struct.
i : int
The index of the word at the center of the current window. This is
referred to as word2 in some parts of the implementation.
j : int
The index of another word inside the window. This is referred to as
word in some parts of the implementation.
Notes
-----
Modifies c.next_random as a side-effect.
"""
cdef:
np.uint32_t word_index = c.indexes[j]
np.uint32_t word2_index = c.indexes[i]
np.uint32_t *subwords_index = c.subwords_idx[i]
np.uint32_t subwords_len = c.subwords_idx_len[i]
cdef long long row1 = word2_index * c.size, row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, label, f_dot
cdef np.uint32_t target_index
cdef int d
memset(c.work, 0, c.size * cython.sizeof(REAL_t))
memset(c.neu1, 0, c.size * cython.sizeof(REAL_t))
scopy(&c.size, &c.syn0_vocab[row1], &ONE, c.neu1, &ONE)
#
# Avoid division by zero.
#
cdef REAL_t norm_factor
if subwords_len:
for d in range(subwords_len):
our_saxpy(&c.size, &ONEF, &c.syn0_ngrams[subwords_index[d] * c.size], &ONE, c.neu1, &ONE)
norm_factor = ONEF / subwords_len
sscal(&c.size, &norm_factor, c.neu1, &ONE)
for d in range(c.negative+1):
if d == 0:
target_index = word_index
label = ONEF
else:
target_index = bisect_left(
c.cum_table, (c.next_random >> 16) % c.cum_table[c.cum_table_len-1], 0, c.cum_table_len)
c.next_random = (c.next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == word_index:
continue
label = <REAL_t>0.0
row2 = target_index * c.size
f_dot = our_dot(&c.size, c.neu1, &ONE, &c.syn1neg[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * c.alpha
our_saxpy(&c.size, &g, &c.syn1neg[row2], &ONE, c.work, &ONE)
our_saxpy(&c.size, &g, c.neu1, &ONE, &c.syn1neg[row2], &ONE)
our_saxpy(&c.size, &c.vocab_lockf[word2_index % c.vocab_lockf_len], c.work, &ONE, &c.syn0_vocab[row1], &ONE)
for d in range(subwords_len):
our_saxpy(&c.size, &c.ngrams_lockf[subwords_index[d] % c.ngrams_lockf_len],
c.work, &ONE, &c.syn0_ngrams[subwords_index[d]*c.size], &ONE)
cdef void fasttext_fast_sentence_sg_hs(FastTextConfig *c, int i, int j) nogil:
"""Perform skipgram training with hierarchical sampling.
Parameters
----------
c : FastTextConfig *
A pointer to a fully initialized and populated struct.
i : int
The index of the word at the center of the current window. This is
referred to as word2 in some parts of the implementation.
j : int
The index of another word inside the window. This is referred to as
word in some parts of the implementation.
"""
cdef:
np.uint32_t *word_point = c.points[j]
np.uint8_t *word_code = c.codes[j]
int codelen = c.codelens[j]
np.uint32_t word2_index = c.indexes[i]
np.uint32_t *subwords_index = c.subwords_idx[i]
np.uint32_t subwords_len = c.subwords_idx_len[i]
#
# b : long long
# iteration variable
# row1 : long long
# Offset for word2 (!!) into the syn0_vocab array
# row2 : long long
# Another offset into the syn0_vocab array
# f : REAL_t
# ?
# f_dot : REAL_t
# Dot product result
# g : REAL_t
# ?
#
cdef long long b
cdef long long row1 = word2_index * c.size, row2
cdef REAL_t f, g, f_dot
memset(c.work, 0, c.size * cython.sizeof(REAL_t))
memset(c.neu1, 0, c.size * cython.sizeof(REAL_t))
scopy(&c.size, &c.syn0_vocab[row1], &ONE, c.neu1, &ONE)
#
# Avoid division by zero.
#
cdef REAL_t norm_factor
if subwords_len:
for d in range(subwords_len):
row2 = subwords_index[d] * c.size
our_saxpy(&c.size, &ONEF, &c.syn0_ngrams[row2], &ONE, c.neu1, &ONE)
norm_factor = ONEF / subwords_len
sscal(&c.size, &norm_factor, c.neu1, &ONE)
for b in range(codelen):
row2 = word_point[b] * c.size
f_dot = our_dot(&c.size, c.neu1, &ONE, &c.syn1[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * c.alpha
our_saxpy(&c.size, &g, &c.syn1[row2], &ONE, c.work, &ONE)
our_saxpy(&c.size, &g, c.neu1, &ONE, &c.syn1[row2], &ONE)
our_saxpy(&c.size, &c.vocab_lockf[word2_index % c.vocab_lockf_len], c.work, &ONE, &c.syn0_vocab[row1], &ONE)
for d in range(subwords_len):
row2 = subwords_index[d] * c.size
our_saxpy(
&c.size, &c.ngrams_lockf[subwords_index[d] % c.ngrams_lockf_len], c.work, &ONE,
&c.syn0_ngrams[row2], &ONE)
cdef void fasttext_fast_sentence_cbow_neg(FastTextConfig *c, int i, int j, int k) nogil:
"""Perform CBOW training with negative sampling.
Parameters
----------
c : FastTextConfig *
A pointer to a fully initialized and populated struct.
i : int
The index of a word inside the current window.
j : int
The start of the current window.
k : int
The end of the current window. Essentially, j <= i < k.
Notes
-----
Modifies c.next_random as a side-effect.
"""
cdef long long row2
cdef unsigned long long modulo = 281474976710655ULL
cdef REAL_t f, g, count, inv_count = 1.0, label, f_dot
cdef np.uint32_t target_index, word_index
cdef int d, m
word_index = c.indexes[i]
memset(c.neu1, 0, c.size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
count += ONEF
our_saxpy(&c.size, &ONEF, &c.syn0_vocab[c.indexes[m] * c.size], &ONE, c.neu1, &ONE)
for d in range(c.subwords_idx_len[m]):
count += ONEF
our_saxpy(&c.size, &ONEF, &c.syn0_ngrams[c.subwords_idx[m][d] * c.size], &ONE, c.neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF / count
if c.cbow_mean:
sscal(&c.size, &inv_count, c.neu1, &ONE)
memset(c.work, 0, c.size * cython.sizeof(REAL_t))
for d in range(c.negative+1):
if d == 0:
target_index = word_index
label = ONEF
else:
target_index = bisect_left(c.cum_table, (c.next_random >> 16) % c.cum_table[c.cum_table_len-1], 0, c.cum_table_len)
c.next_random = (c.next_random * <unsigned long long>25214903917ULL + 11) & modulo
if target_index == word_index:
continue
label = <REAL_t>0.0
row2 = target_index * c.size
f_dot = our_dot(&c.size, c.neu1, &ONE, &c.syn1neg[row2], &ONE)
if f_dot <= -MAX_EXP:
f = 0.0
elif f_dot >= MAX_EXP:
f = 1.0
else:
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (label - f) * c.alpha
our_saxpy(&c.size, &g, &c.syn1neg[row2], &ONE, c.work, &ONE)
our_saxpy(&c.size, &g, c.neu1, &ONE, &c.syn1neg[row2], &ONE)
if not c.cbow_mean: # divide error over summed window vectors
sscal(&c.size, &inv_count, c.work, &ONE)
for m in range(j,k):
if m == i:
continue
our_saxpy(
&c.size, &c.vocab_lockf[c.indexes[m] % c.vocab_lockf_len], c.work, &ONE,
&c.syn0_vocab[c.indexes[m]*c.size], &ONE)
for d in range(c.subwords_idx_len[m]):
our_saxpy(
&c.size, &c.ngrams_lockf[c.subwords_idx[m][d] % c.ngrams_lockf_len], c.work, &ONE,
&c.syn0_ngrams[c.subwords_idx[m][d]*c.size], &ONE)
cdef void fasttext_fast_sentence_cbow_hs(FastTextConfig *c, int i, int j, int k) nogil:
"""Perform CBOW training with hierarchical sampling.
Parameters
----------
c : FastTextConfig *
A pointer to a fully initialized and populated struct.
i : int
The index of a word inside the current window.
j : int
The start of the current window.
k : int
The end of the current window. Essentially, j <= i < k.
"""
cdef:
np.uint32_t *word_point = c.points[i]
np.uint8_t *word_code = c.codes[i]
cdef long long b
cdef long long row2
cdef REAL_t f, g, count, inv_count = 1.0, f_dot
cdef int m
memset(c.neu1, 0, c.size * cython.sizeof(REAL_t))
count = <REAL_t>0.0
for m in range(j, k):
if m == i:
continue
count += ONEF
our_saxpy(&c.size, &ONEF, &c.syn0_vocab[c.indexes[m] * c.size], &ONE, c.neu1, &ONE)
for d in range(c.subwords_idx_len[m]):
count += ONEF
our_saxpy(&c.size, &ONEF, &c.syn0_ngrams[c.subwords_idx[m][d] * c.size], &ONE, c.neu1, &ONE)
if count > (<REAL_t>0.5):
inv_count = ONEF / count
if c.cbow_mean:
sscal(&c.size, &inv_count, c.neu1, &ONE)
memset(c.work, 0, c.size * cython.sizeof(REAL_t))
for b in range(c.codelens[i]):
row2 = word_point[b] * c.size
f_dot = our_dot(&c.size, c.neu1, &ONE, &c.syn1[row2], &ONE)
if f_dot <= -MAX_EXP or f_dot >= MAX_EXP:
continue
f = EXP_TABLE[<int>((f_dot + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
g = (1 - word_code[b] - f) * c.alpha
our_saxpy(&c.size, &g, &c.syn1[row2], &ONE, c.work, &ONE)
our_saxpy(&c.size, &g, c.neu1, &ONE, &c.syn1[row2], &ONE)
if not c.cbow_mean: # divide error over summed window vectors
sscal(&c.size, &inv_count, c.work, &ONE)
for m in range(j,k):
if m == i:
continue
our_saxpy(
&c.size, &c.vocab_lockf[c.indexes[m] % c.vocab_lockf_len], c.work, &ONE,
&c.syn0_vocab[c.indexes[m]*c.size], &ONE)
for d in range(c.subwords_idx_len[m]):
our_saxpy(
&c.size, &c.ngrams_lockf[c.subwords_idx[m][d] % c.ngrams_lockf_len], c.work, &ONE,
&c.syn0_ngrams[c.subwords_idx[m][d]*c.size], &ONE)
cdef void init_ft_config(FastTextConfig *c, model, alpha, _work, _neu1):
"""Load model parameters into a FastTextConfig struct.
The struct itself is defined and documented in fasttext_inner.pxd.
Parameters
----------
c : FastTextConfig *
A pointer to the struct to initialize.
model : gensim.models.fasttext.FastText
The model to load.
alpha : float
The initial learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
"""
c.sg = model.sg
c.hs = model.hs
c.negative = model.negative
c.sample = (model.sample != 0)
c.cbow_mean = model.cbow_mean
c.window = model.window
c.workers = model.workers
c.syn0_vocab = <REAL_t *>(np.PyArray_DATA(model.wv.vectors_vocab))
c.syn0_ngrams = <REAL_t *>(np.PyArray_DATA(model.wv.vectors_ngrams))
# EXPERIMENTAL lockf scaled suppression/enablement of training
c.vocab_lockf = <REAL_t *>(np.PyArray_DATA(model.wv.vectors_vocab_lockf))
c.vocab_lockf_len = len(model.wv.vectors_vocab_lockf)
c.ngrams_lockf = <REAL_t *>(np.PyArray_DATA(model.wv.vectors_ngrams_lockf))
c.ngrams_lockf_len = len(model.wv.vectors_ngrams_lockf)
c.alpha = alpha
c.size = model.wv.vector_size
if c.hs:
c.syn1 = <REAL_t *>(np.PyArray_DATA(model.syn1))
if c.negative:
c.syn1neg = <REAL_t *>(np.PyArray_DATA(model.syn1neg))
c.cum_table = <np.uint32_t *>(np.PyArray_DATA(model.cum_table))
c.cum_table_len = len(model.cum_table)
if c.negative or c.sample:
c.next_random = (2**24) * model.random.randint(0, 2**24) + model.random.randint(0, 2**24)
# convert Python structures to primitive types, so we can release the GIL
c.work = <REAL_t *>np.PyArray_DATA(_work)
c.neu1 = <REAL_t *>np.PyArray_DATA(_neu1)
cdef object populate_ft_config(FastTextConfig *c, wv, buckets_word, sentences):
"""Prepare C structures so we can go "full C" and release the Python GIL.
We create indices over the sentences. We also perform some calculations for
each token and store the result up front to save time: we'll be seeing each
token multiple times because of windowing, so better to do the work once
here.
Parameters
----------
c : FastTextConfig*
A pointer to the struct that will contain the populated indices.
wv : FastTextKeyedVectors
The vocabulary
buckets_word : list
A list containing the buckets each word appears in
sentences : iterable
The sentences to read
Returns
-------
effective_words : int
The number of in-vocabulary tokens.
effective_sentences : int
The number of non-empty sentences.
Notes
-----
If sampling is used, each vocab term must have the .sample_int attribute
initialized.
See Also
--------
:meth:`gensim.models.word2vec.Word2VecVocab.create_binary_tree`
"""
cdef int effective_words = 0
cdef int effective_sentences = 0
cdef np.uint32_t *vocab_sample_ints
c.sentence_idx[0] = 0 # indices of the first sentence always start at 0
if c.sample:
vocab_sample_ints = <np.uint32_t *>np.PyArray_DATA(wv.expandos['sample_int'])
if c.hs:
vocab_codes = wv.expandos['code']
vocab_points = wv.expandos['point']
for sent in sentences:
if not sent:
continue # ignore empty sentences; leave effective_sentences unchanged
for token in sent:
word_index = wv.key_to_index.get(token, None)
if word_index is None:
continue # leaving `effective_words` unchanged = shortening the sentence = expanding the window
if c.sample and vocab_sample_ints[word_index] < random_int32(&c.next_random):
continue
c.indexes[effective_words] = word_index
if wv.bucket:
c.subwords_idx_len[effective_words] = <int>(len(buckets_word[word_index]))
c.subwords_idx[effective_words] = <np.uint32_t *>np.PyArray_DATA(buckets_word[word_index])
else:
c.subwords_idx_len[effective_words] = 0
if c.hs:
c.codelens[effective_words] = <int>len(vocab_codes[word_index])
c.codes[effective_words] = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word_index])
c.points[effective_words] = <np.uint32_t *>np.PyArray_DATA(vocab_points[word_index])
effective_words += 1
if effective_words == MAX_SENTENCE_LEN:
break
# keep track of which words go into which sentence, so we don't train
# across sentence boundaries.
effective_sentences += 1
c.sentence_idx[effective_sentences] = effective_words
if effective_words == MAX_SENTENCE_LEN:
break
return effective_words, effective_sentences
cdef void fasttext_train_any(FastTextConfig *c, int num_sentences) nogil:
"""Performs training on a fully initialized and populated configuration.
Parameters
----------
c : FastTextConfig *
A pointer to the configuration struct.
num_sentences : int
The number of sentences to train.
"""
cdef:
int sent_idx
int sentence_start
int sentence_end
int i
int window_start
int window_end
int j
for sent_idx in range(num_sentences):
sentence_start = c.sentence_idx[sent_idx]
sentence_end = c.sentence_idx[sent_idx + 1]
for i in range(sentence_start, sentence_end):
#
# Determine window boundaries, making sure we don't leak into
# adjacent sentences.
#
window_start = i - c.window + c.reduced_windows[i]
if window_start < sentence_start:
window_start = sentence_start
window_end = i + c.window + 1 - c.reduced_windows[i]
if window_end > sentence_end:
window_end = sentence_end
#
# TODO: why can't I use min/max here? I get a segfault.
#
# window_start = max(sentence_start, i - c.window + c.reduced_windows[i])
# window_end = min(sentence_end, i + c.window + 1 - c.reduced_windows[i])
#
if c.sg == 0:
if c.hs:
fasttext_fast_sentence_cbow_hs(c, i, window_start, window_end)
if c.negative:
fasttext_fast_sentence_cbow_neg(c, i, window_start, window_end)
else:
for j in range(window_start, window_end):
if j == i:
# no reason to train a center word as predicting itself
continue
if c.hs:
fasttext_fast_sentence_sg_hs(c, i, j)
if c.negative:
fasttext_fast_sentence_sg_neg(c, i, j)
def train_batch_any(model, sentences, alpha, _work, _neu1):
"""Update the model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`~gensim.models.fasttext.FastText.train`.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
Model to be trained.
sentences : iterable of list of str
A single batch: part of the corpus streamed directly from disk/network.
alpha : float
Learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
cdef:
FastTextConfig c
int num_words = 0
int num_sentences = 0
init_ft_config(&c, model, alpha, _work, _neu1)
num_words, num_sentences = populate_ft_config(&c, model.wv, model.wv.buckets_word, sentences)
# precompute "reduced window" offsets in a single randint() call
if model.shrink_windows:
for i, randint in enumerate(model.random.randint(0, c.window, num_words)):
c.reduced_windows[i] = randint
else:
for i in range(num_words):
c.reduced_windows[i] = 0
# release GIL & train on all sentences in the batch
with nogil:
fasttext_train_any(&c, num_sentences)
return num_words
cpdef ft_hash_bytes(bytes bytez):
"""Calculate hash based on `bytez`.
Reproduce `hash method from Facebook fastText implementation
<https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc>`_.
Parameters
----------
bytez : bytes
The string whose hash needs to be calculated, encoded as UTF-8.
Returns
-------
unsigned int
The hash of the string.
"""
cdef np.uint32_t h = 2166136261
cdef char b
for b in bytez:
h = h ^ <np.uint32_t>(<np.int8_t>b)
h = h * 16777619
return h
cpdef compute_ngrams(word, unsigned int min_n, unsigned int max_n):
"""Get the list of all possible ngrams for a given word.
Parameters
----------
word : str
The word whose ngrams need to be computed.
min_n : unsigned int
Minimum character length of the ngrams.
max_n : unsigned int
Maximum character length of the ngrams.
Returns
-------
list of str
Sequence of character ngrams.
"""
cdef unicode extended_word = f'<{word}>'
ngrams = []
for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):
for i in range(0, len(extended_word) - ngram_length + 1):
ngrams.append(extended_word[i:i + ngram_length])
return ngrams
#
# UTF-8 bytes that begin with 10 are subsequent bytes of a multi-byte sequence,
# as opposed to a new character.
#
cdef unsigned char _MB_MASK = 0xC0
cdef unsigned char _MB_START = 0x80
cpdef compute_ngrams_bytes(word, unsigned int min_n, unsigned int max_n):
"""Computes ngrams for a word.
Ported from the original FB implementation.
Parameters
----------
word : str
A unicode string.
min_n : unsigned int
The minimum ngram length.
max_n : unsigned int
The maximum ngram length.
Returns:
--------
list of str
A list of ngrams, where each ngram is a list of **bytes**.
See Also
--------
`Original implementation <https://github.com/facebookresearch/fastText/blob/7842495a4d64c7a3bb4339d45d6e64321d002ed8/src/dictionary.cc#L172>`__
"""
cdef bytes utf8_word = ('<%s>' % word).encode("utf-8")
cdef const unsigned char *bytez = utf8_word
cdef size_t num_bytes = len(utf8_word)
cdef size_t j, i, n
ngrams = []
for i in range(num_bytes):
if bytez[i] & _MB_MASK == _MB_START:
continue
j, n = i, 1
while j < num_bytes and n <= max_n:
j += 1
while j < num_bytes and (bytez[j] & _MB_MASK) == _MB_START:
j += 1
if n >= min_n and not (n == 1 and (i == 0 or j == num_bytes)):
ngram = bytes(bytez[i:j])
ngrams.append(ngram)
n += 1
return ngrams
def init():
"""Precompute function `sigmoid(x) = 1 / (1 + exp(-x))`, for x values discretized into table EXP_TABLE.
Also calculate log(sigmoid(x)) into LOG_TABLE.
We recalc, rather than re-use the table from word2vec_inner, because Facebook's FastText
code uses a 512-slot table rather than the 1000 precedent of word2vec.c.
"""
cdef int i
# build the sigmoid table
for i in range(EXP_TABLE_SIZE):
EXP_TABLE[i] = <REAL_t>exp((i / <REAL_t>EXP_TABLE_SIZE * 2 - 1) * MAX_EXP)
EXP_TABLE[i] = <REAL_t>(EXP_TABLE[i] / (EXP_TABLE[i] + 1))
LOG_TABLE[i] = <REAL_t>log( EXP_TABLE[i] )
init() # initialize the module
MAX_WORDS_IN_BATCH = MAX_SENTENCE_LEN
| 24,433
|
Python
|
.py
| 605
| 32.856198
| 147
| 0.611193
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,135
|
word2vec_corpusfile.pyx
|
piskvorky_gensim/gensim/models/word2vec_corpusfile.pyx
|
#!/usr/bin/env cython
# distutils: language = c++
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: embedsignature=True
# coding: utf-8
#
# Copyright (C) 2018 Dmitry Persiyanov <dmitry.persiyanov@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Optimized cython functions for file-based training :class:`~gensim.models.word2vec.Word2Vec` model."""
import cython
import numpy as np
from gensim.utils import any2utf8
cimport numpy as np
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool as bool_t
from gensim.models.word2vec_inner cimport (
w2v_fast_sentence_sg_hs,
w2v_fast_sentence_sg_neg,
w2v_fast_sentence_cbow_hs,
w2v_fast_sentence_cbow_neg,
random_int32,
init_w2v_config,
Word2VecConfig
)
DEF MAX_SENTENCE_LEN = 10000
@cython.final
cdef class CythonVocab:
def __init__(self, wv, hs=0, fasttext=0):
cdef VocabItem word
vocab_sample_ints = wv.expandos['sample_int']
if hs:
vocab_codes = wv.expandos['code']
vocab_points = wv.expandos['point']
for py_token in wv.key_to_index.keys():
token = any2utf8(py_token)
word.index = wv.get_index(py_token)
word.sample_int = vocab_sample_ints[word.index]
if hs:
word.code = <np.uint8_t *>np.PyArray_DATA(vocab_codes[word.index])
word.code_len = <int>len(vocab_codes[word.index])
word.point = <np.uint32_t *>np.PyArray_DATA(vocab_points[word.index])
# subwords information, used only in FastText model
if fasttext:
word.subword_idx_len = <int>(len(wv.buckets_word[word.index]))
word.subword_idx = <np.uint32_t *>np.PyArray_DATA(wv.buckets_word[word.index])
self.vocab[token] = word
cdef cvocab_t* get_vocab_ptr(self) nogil except *:
return &self.vocab
def rebuild_cython_line_sentence(source, max_sentence_length):
return CythonLineSentence(source, max_sentence_length=max_sentence_length)
cdef bytes to_bytes(key):
if isinstance(key, bytes):
return <bytes>key
else:
return key.encode('utf8')
@cython.final
cdef class CythonLineSentence:
def __cinit__(self, source, offset=0, max_sentence_length=MAX_SENTENCE_LEN):
self._thisptr = new FastLineSentence(to_bytes(source), offset)
def __init__(self, source, offset=0, max_sentence_length=MAX_SENTENCE_LEN):
self.source = to_bytes(source)
self.offset = offset
self.max_sentence_length = max_sentence_length
self.max_words_in_batch = max_sentence_length
def __dealloc__(self):
if self._thisptr != NULL:
del self._thisptr
cpdef bool_t is_eof(self) nogil:
return self._thisptr.IsEof()
cpdef vector[string] read_sentence(self) nogil except *:
return self._thisptr.ReadSentence()
cpdef vector[vector[string]] _read_chunked_sentence(self) nogil except *:
cdef vector[string] sent = self.read_sentence()
return self._chunk_sentence(sent)
cpdef vector[vector[string]] _chunk_sentence(self, vector[string] sent) nogil:
cdef vector[vector[string]] res
cdef vector[string] chunk
cdef size_t cur_idx = 0
if sent.size() > self.max_sentence_length:
while cur_idx < sent.size():
chunk.clear()
for i in range(cur_idx, min(cur_idx + self.max_sentence_length, sent.size())):
chunk.push_back(sent[i])
res.push_back(chunk)
cur_idx += chunk.size()
else:
res.push_back(sent)
return res
cpdef void reset(self) nogil:
self._thisptr.Reset()
def __iter__(self):
self.reset()
while not self.is_eof():
chunked_sentence = self._read_chunked_sentence()
for chunk in chunked_sentence:
if not chunk.empty():
yield chunk
def __reduce__(self):
# This function helps pickle to correctly serialize objects of this class.
return rebuild_cython_line_sentence, (self.source, self.max_sentence_length)
cpdef vector[vector[string]] next_batch(self) nogil except *:
cdef:
vector[vector[string]] job_batch
vector[vector[string]] chunked_sentence
vector[string] data
size_t batch_size = 0
size_t last_idx = 0
size_t tmp = 0
int idx
# Try to read data from previous calls which was not returned
if not self.buf_data.empty():
job_batch = self.buf_data
self.buf_data.clear()
for sent in job_batch:
batch_size += sent.size()
while not self.is_eof() and batch_size <= self.max_words_in_batch:
data = self.read_sentence()
chunked_sentence = self._chunk_sentence(data)
for chunk in chunked_sentence:
job_batch.push_back(chunk)
batch_size += chunk.size()
if batch_size > self.max_words_in_batch:
# Save data which doesn't fit in batch in order to return it later.
self.buf_data.clear()
tmp = batch_size
idx = job_batch.size() - 1
while idx >= 0:
if tmp - job_batch[idx].size() <= self.max_words_in_batch:
last_idx = idx + 1
break
else:
tmp -= job_batch[idx].size()
idx -= 1
for i in range(last_idx, job_batch.size()):
self.buf_data.push_back(job_batch[i])
job_batch.resize(last_idx)
return job_batch
cdef void prepare_c_structures_for_batch(
vector[vector[string]] &sentences, int sample, int hs, int window, long long *total_words,
int *effective_words, int *effective_sentences, unsigned long long *next_random,
cvocab_t *vocab, int *sentence_idx, np.uint32_t *indexes, int *codelens,
np.uint8_t **codes, np.uint32_t **points, np.uint32_t *reduced_windows,
int shrink_windows,
) nogil:
cdef VocabItem word
cdef string token
cdef vector[string] sent
sentence_idx[0] = 0 # indices of the first sentence always start at 0
for sent in sentences:
if sent.empty():
continue # ignore empty sentences; leave effective_sentences unchanged
total_words[0] += sent.size()
for token in sent:
# leaving `effective_words` unchanged = shortening the sentence = expanding the window
if vocab[0].find(token) == vocab[0].end():
continue
word = vocab[0][token]
if sample and word.sample_int < random_int32(next_random):
continue
indexes[effective_words[0]] = word.index
if hs:
codelens[effective_words[0]] = word.code_len
codes[effective_words[0]] = word.code
points[effective_words[0]] = word.point
effective_words[0] += 1
if effective_words[0] == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# keep track of which words go into which sentence, so we don't train
# across sentence boundaries.
# indices of sentence number X are between <sentence_idx[X], sentence_idx[X])
effective_sentences[0] += 1
sentence_idx[effective_sentences[0]] = effective_words[0]
if effective_words[0] == MAX_SENTENCE_LEN:
break # TODO: log warning, tally overflow?
# precompute "reduced window" offsets in a single randint() call
if shrink_windows:
for i in range(effective_words[0]):
reduced_windows[i] = random_int32(next_random) % window
else:
for i in range(effective_words[0]):
reduced_windows[i] = 0
cdef REAL_t get_alpha(REAL_t alpha, REAL_t end_alpha, int cur_epoch, int num_epochs) nogil:
return alpha - ((alpha - end_alpha) * (<REAL_t> cur_epoch) / num_epochs)
cdef REAL_t get_next_alpha(
REAL_t start_alpha, REAL_t end_alpha, long long total_examples, long long total_words,
long long expected_examples, long long expected_words, int cur_epoch, int num_epochs) nogil:
cdef REAL_t epoch_progress
if expected_examples != -1:
# examples-based decay
epoch_progress = (<REAL_t> total_examples) / expected_examples
else:
# word-based decay
epoch_progress = (<REAL_t> total_words) / expected_words
cdef REAL_t progress = (cur_epoch + epoch_progress) / num_epochs
cdef REAL_t next_alpha = start_alpha - (start_alpha - end_alpha) * progress
return max(end_alpha, next_alpha)
def train_epoch_sg(model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words, _work,
_neu1, compute_loss,):
"""Train Skipgram model for one epoch by training on an input stream. This function is used only in multistream mode.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The Word2Vec model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
compute_loss : bool
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef Word2VecConfig c
# For learning rate updates
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef long long total_sentences = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
init_w2v_config(&c, model, _alpha, compute_loss, _work)
cdef vector[vector[string]] sentences
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_sentences = 0
effective_words = 0
sentences = input_stream.next_batch()
prepare_c_structures_for_batch(
sentences, c.sample, c.hs, c.window, &total_words, &effective_words, &effective_sentences,
&c.next_random, vocab.get_vocab_ptr(), c.sentence_idx, c.indexes,
c.codelens, c.codes, c.points, c.reduced_windows, shrink_windows)
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
for j in range(j, k):
if j == i:
continue
if c.hs:
w2v_fast_sentence_sg_hs(
c.points[i], c.codes[i], c.codelens[i], c.syn0, c.syn1, c.size, c.indexes[j],
c.alpha, c.work, c.words_lockf, c.words_lockf_len, c.compute_loss,
&c.running_training_loss)
if c.negative:
c.next_random = w2v_fast_sentence_sg_neg(
c.negative, c.cum_table, c.cum_table_len, c.syn0, c.syn1neg, c.size,
c.indexes[i], c.indexes[j], c.alpha, c.work, c.next_random,
c.words_lockf, c.words_lockf_len,
c.compute_loss, &c.running_training_loss)
total_sentences += sentences.size()
total_effective_words += effective_words
c.alpha = get_next_alpha(
start_alpha, end_alpha, total_sentences, total_words,
expected_examples, expected_words, cur_epoch, num_epochs)
model.running_training_loss = c.running_training_loss
return total_sentences, total_effective_words, total_words
def train_epoch_cbow(model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words, _work,
_neu1, compute_loss,):
"""Train CBOW model for one epoch by training on an input stream. This function is used only in multistream mode.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The Word2Vec model instance to train.
corpus_file : str
Path to corpus file.
_cur_epoch : int
Current epoch number. Used for calculating and decaying learning rate.
_work : np.ndarray
Private working memory for each worker.
_neu1 : np.ndarray
Private working memory for each worker.
compute_loss : bool
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (They already existed in the vocabulary
and were not discarded by negative sampling).
"""
cdef Word2VecConfig c
# For learning rate updates
cdef int cur_epoch = _cur_epoch
cdef int num_epochs = model.epochs
cdef long long expected_examples = (-1 if _expected_examples is None else _expected_examples)
cdef long long expected_words = (-1 if _expected_words is None else _expected_words)
cdef REAL_t start_alpha = model.alpha
cdef REAL_t end_alpha = model.min_alpha
cdef REAL_t _alpha = get_alpha(model.alpha, end_alpha, cur_epoch, num_epochs)
cdef CythonLineSentence input_stream = CythonLineSentence(corpus_file, offset)
cdef CythonVocab vocab = _cython_vocab
cdef int i, j, k
cdef int effective_words = 0, effective_sentences = 0
cdef long long total_sentences = 0
cdef long long total_effective_words = 0, total_words = 0
cdef int sent_idx, idx_start, idx_end
cdef int shrink_windows = int(model.shrink_windows)
init_w2v_config(&c, model, _alpha, compute_loss, _work, _neu1)
cdef vector[vector[string]] sentences
with nogil:
input_stream.reset()
while not (input_stream.is_eof() or total_words > expected_words / c.workers):
effective_sentences = 0
effective_words = 0
sentences = input_stream.next_batch()
prepare_c_structures_for_batch(
sentences, c.sample, c.hs, c.window, &total_words, &effective_words,
&effective_sentences, &c.next_random, vocab.get_vocab_ptr(), c.sentence_idx,
c.indexes, c.codelens, c.codes, c.points, c.reduced_windows, shrink_windows)
for sent_idx in range(effective_sentences):
idx_start = c.sentence_idx[sent_idx]
idx_end = c.sentence_idx[sent_idx + 1]
for i in range(idx_start, idx_end):
j = i - c.window + c.reduced_windows[i]
if j < idx_start:
j = idx_start
k = i + c.window + 1 - c.reduced_windows[i]
if k > idx_end:
k = idx_end
if c.hs:
w2v_fast_sentence_cbow_hs(
c.points[i], c.codes[i], c.codelens, c.neu1, c.syn0, c.syn1, c.size, c.indexes, c.alpha,
c.work, i, j, k, c.cbow_mean, c.words_lockf, c.words_lockf_len, c.compute_loss,
&c.running_training_loss)
if c.negative:
c.next_random = w2v_fast_sentence_cbow_neg(
c.negative, c.cum_table, c.cum_table_len, c.codelens, c.neu1, c.syn0,
c.syn1neg, c.size, c.indexes, c.alpha, c.work, i, j, k, c.cbow_mean,
c.next_random, c.words_lockf, c.words_lockf_len, c.compute_loss,
&c.running_training_loss)
total_sentences += sentences.size()
total_effective_words += effective_words
c.alpha = get_next_alpha(
start_alpha, end_alpha, total_sentences, total_words,
expected_examples, expected_words, cur_epoch, num_epochs)
model.running_training_loss = c.running_training_loss
return total_sentences, total_effective_words, total_words
CORPUSFILE_VERSION = 1
| 17,788
|
Python
|
.py
| 366
| 37.784153
| 121
| 0.61123
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,136
|
gensim_genmodel.py
|
piskvorky_gensim/gensim/examples/dmlcz/gensim_genmodel.py
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s LANGUAGE METHOD
Generate topic models for the specified subcorpus. METHOD is currently one \
of 'tfidf', 'lsi', 'lda', 'rp'.
Example: ./gensim_genmodel.py any lsi
"""
import logging
import sys
import os.path
from gensim.corpora import dmlcorpus, MmCorpus
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel
import gensim_build
# internal method parameters
DIM_RP = 300 # dimensionality for random projections
DIM_LSI = 200 # for lantent semantic indexing
DIM_LDA = 100 # for latent dirichlet allocation
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
method = sys.argv[2].strip().lower()
logging.info("loading corpus mappings")
config = dmlcorpus.DmlConfig('%s_%s' % (gensim_build.PREFIX, language),
resultDir=gensim_build.RESULT_DIR, acceptLangs=[language])
logging.info("loading word id mapping from %s", config.resultFile('wordids.txt'))
id2word = dmlcorpus.DmlCorpus.loadDictionary(config.resultFile('wordids.txt'))
logging.info("loaded %i word ids", len(id2word))
corpus = MmCorpus(config.resultFile('bow.mm'))
if method == 'tfidf':
model = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
model.save(config.resultFile('model_tfidf.pkl'))
elif method == 'lda':
model = ldamodel.LdaModel(corpus, id2word=id2word, num_topics=DIM_LDA)
model.save(config.resultFile('model_lda.pkl'))
elif method == 'lsi':
# first, transform word counts to tf-idf weights
tfidf = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
# then find the transformation from tf-idf to latent space
model = lsimodel.LsiModel(tfidf[corpus], id2word=id2word, num_topics=DIM_LSI)
model.save(config.resultFile('model_lsi.pkl'))
elif method == 'rp':
# first, transform word counts to tf-idf weights
tfidf = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
# then find the transformation from tf-idf to latent space
model = rpmodel.RpModel(tfidf[corpus], id2word=id2word, num_topics=DIM_RP)
model.save(config.resultFile('model_rp.pkl'))
else:
raise ValueError('unknown topic extraction method: %s' % repr(method))
MmCorpus.saveCorpus(config.resultFile('%s.mm' % method), model[corpus])
logging.info("finished running %s", program)
| 2,946
|
Python
|
.py
| 60
| 43.516667
| 95
| 0.698745
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,137
|
sources.py
|
piskvorky_gensim/gensim/examples/dmlcz/sources.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module contains implementations (= different classes) which encapsulate the
idea of a Digital Library document source.
A document source is basically a collection of articles sharing the same format,
same location (type of access), same way of parsing them etc.
Different sources can be aggregated into a single corpus, which is what the
`DmlCorpus` class does (see the `dmlcorpus` module).
"""
import logging
import os
import os.path
import re
import xml.sax # for parsing arxmliv articles
from gensim import utils
import sys
if sys.version_info[0] >= 3:
unicode = str
PAT_TAG = re.compile(r'<(.*?)>(.*)</.*?>')
logger = logging.getLogger('gensim.corpora.sources')
class ArticleSource:
"""
Objects of this class describe a single source of articles.
A source is an abstraction over where the documents reside (the findArticles()
method), how to retrieve their fulltexts, their metadata, how to tokenize the
articles and how to normalize the tokens.
What is NOT abstracted away (ie. must hold for all sources) is the idea of
article identifiers (URIs), which uniquely identify each article within
one source.
This class is just an ABC interface; see eg. DmlSource or ArxmlivSource classes
for concrete instances.
"""
def __init__(self, sourceId):
self.sourceId = sourceId
def __str__(self):
return self.sourceId
def findArticles(self):
raise NotImplementedError('Abstract Base Class')
def getContent(self, uri):
raise NotImplementedError('Abstract Base Class')
def getMeta(self, uri):
raise NotImplementedError('Abstract Base Class')
def tokenize(self, content):
raise NotImplementedError('Abstract Base Class')
def normalizeWord(self, word):
raise NotImplementedError('Abstract Base Class')
# endclass ArticleSource
class DmlSource(ArticleSource):
"""
Article source for articles in DML format (DML-CZ, Numdam):
1) articles = directories starting with '#'
2) content is stored in fulltext.txt
3) metadata are stored in meta.xml
Article URI is currently (a part of) the article's path on filesystem.
See the ArticleSource class for general info on sources.
"""
def __init__(self, sourceId, baseDir):
self.sourceId = sourceId
self.baseDir = os.path.normpath(baseDir)
def __str__(self):
return self.sourceId
@classmethod
def parseDmlMeta(cls, xmlfile):
"""
Parse out all fields from meta.xml, return them as a dictionary.
"""
result = {}
xml = open(xmlfile)
for line in xml:
if line.find('<article>') >= 0: # skip until the beginning of <article> tag
break
for line in xml:
if line.find('</article>') >= 0: # end of <article>, we're done
break
p = re.search(PAT_TAG, line)
if p:
name, cont = p.groups()
name = name.split()[0]
name, cont = name.strip(), cont.strip()
if name == 'msc':
if len(cont) != 5:
logger.warning('invalid MSC=%s in %s', cont, xmlfile)
result.setdefault('msc', []).append(cont)
continue
if name == 'idMR':
cont = cont[2:] # omit MR from MR123456
if name and cont:
result[name] = cont
xml.close()
return result
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
intId = path[1 + path.rfind('#'):]
pathId = path[1 + len(self.baseDir):]
return (intId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain the fulltext.txt file
if not os.path.exists(os.path.join(path, 'fulltext.txt')):
logger.info('missing fulltext in %s', path)
return False
# and also the meta.xml file
if not os.path.exists(os.path.join(path, 'meta.xml')):
logger.info('missing meta.xml in %s', path)
return False
return True
def findArticles(self):
dirTotal = artAccepted = 0
logger.info("looking for '%s' articles inside %s", self.sourceId, self.baseDir)
for root, dirs, files in os.walk(self.baseDir):
dirTotal += 1
root = os.path.normpath(root)
if self.isArticle(root):
artAccepted += 1
yield self.idFromDir(root)
logger.info('%i directories processed, found %i articles', dirTotal, artAccepted)
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'fulltext.txt')
return open(filename).read()
def getMeta(self, uri):
"""
Return article metadata as a attribute->value dictionary.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'meta.xml')
return DmlSource.parseDmlMeta(filename)
def tokenize(self, content):
return [token.encode('utf8') for token in utils.tokenize(content, errors='ignore') if not token.isdigit()]
def normalizeWord(self, word):
wordU = unicode(word, 'utf8')
return wordU.lower().encode('utf8') # lowercase and then convert back to bytestring
# endclass DmlSource
class DmlCzSource(DmlSource):
"""
Article source for articles in DML-CZ format:
1) articles = directories starting with '#'
2) content is stored in fulltext.txt or fulltext_dspace.txt
3) there exists a dspace_id file, containing internal dmlcz id
3) metadata are stored in meta.xml
See the ArticleSource class for general info on sources.
"""
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
dmlczId = open(os.path.join(path, 'dspace_id')).read().strip()
pathId = path[1 + len(self.baseDir):]
return (dmlczId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain a dspace_id file
if not (os.path.exists(os.path.join(path, 'dspace_id'))):
logger.info('missing dspace_id in %s', path)
return False
# and contain either fulltext.txt or fulltext_dspace.txt file
if not (os.path.exists(os.path.join(path, 'fulltext.txt'))
or os.path.exists(os.path.join(path, 'fulltext-dspace.txt'))):
logger.info('missing fulltext in %s', path)
return False
# and contain the meta.xml file
if not os.path.exists(os.path.join(path, 'meta.xml')):
logger.info('missing meta.xml in %s', path)
return False
return True
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename1 = os.path.join(self.baseDir, pathId, 'fulltext.txt')
filename2 = os.path.join(self.baseDir, pathId, 'fulltext-dspace.txt')
if os.path.exists(filename1) and os.path.exists(filename2):
# if both fulltext and dspace files exist, pick the larger one
if os.path.getsize(filename1) < os.path.getsize(filename2):
filename = filename2
else:
filename = filename1
elif os.path.exists(filename1):
filename = filename1
else:
assert os.path.exists(filename2)
filename = filename2
return open(filename).read()
# endclass DmlCzSource
class ArxmlivSource(ArticleSource):
"""
Article source for articles in arxmliv format:
1) articles = directories starting with '#'
2) content is stored in tex.xml
3) metadata in special tags within tex.xml
Article URI is currently (a part of) the article's path on filesystem.
See the ArticleSource class for general info on sources.
"""
class ArxmlivContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.path = [''] # help structure for sax event parsing
self.tokens = [] # will contain tokens once parsing is finished
def startElement(self, name, attr):
# for math tokens, we only care about Math elements directly below <p>
if name == 'Math' and self.path[-1] == 'p' and attr.get('mode', '') == 'inline':
tex = attr.get('tex', '')
if tex and not tex.isdigit():
self.tokens.append('$%s$' % tex.encode('utf8'))
self.path.append(name)
def endElement(self, name):
self.path.pop()
def characters(self, text):
# for text, we only care about tokens directly within the <p> tag
if self.path[-1] == 'p':
tokens = [
token.encode('utf8') for token in utils.tokenize(text, errors='ignore') if not token.isdigit()
]
self.tokens.extend(tokens)
# endclass ArxmlivHandler
class ArxmlivErrorHandler(xml.sax.handler.ErrorHandler):
# Python2.5 implementation of xml.sax is broken -- character streams and
# byte encodings of InputSource are ignored, bad things sometimes happen
# in buffering of multi-byte files (such as utf8), characters get cut in
# the middle, resulting in invalid tokens...
# This is not really a problem with arxmliv xml files themselves, so ignore
# these errors silently.
def error(self, exception):
pass
warning = fatalError = error
# endclass ArxmlivErrorHandler
def __init__(self, sourceId, baseDir):
self.sourceId = sourceId
self.baseDir = os.path.normpath(baseDir)
def __str__(self):
return self.sourceId
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
intId = path[1 + path.rfind('#'):]
pathId = path[1 + len(self.baseDir):]
return (intId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain the tex.xml file
if not os.path.exists(os.path.join(path, 'tex.xml')):
logger.warning('missing tex.xml in %s', path)
return False
return True
def findArticles(self):
dirTotal = artAccepted = 0
logger.info("looking for '%s' articles inside %s", self.sourceId, self.baseDir)
for root, dirs, files in os.walk(self.baseDir):
dirTotal += 1
root = os.path.normpath(root)
if self.isArticle(root):
artAccepted += 1
yield self.idFromDir(root)
logger.info('%i directories processed, found %i articles', dirTotal, artAccepted)
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return open(filename).read()
def getMeta(self, uri):
"""
Return article metadata as an attribute->value dictionary.
"""
# intId, pathId = uri
# filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return {'language': 'eng'} # TODO maybe parse out some meta; but currently not needed for anything...
def tokenize(self, content):
"""
Parse tokens out of xml. There are two types of token: normal text and
mathematics. Both are returned interspersed in a single list, in the same
order as they appeared in the content.
The math tokens will be returned in the form $tex_expression$, ie. with
a dollar sign prefix and suffix.
"""
handler = ArxmlivSource.ArxmlivContentHandler()
xml.sax.parseString(content, handler, ArxmlivSource.ArxmlivErrorHandler())
return handler.tokens
def normalizeWord(self, word):
if word[0] == '$': # ignore math tokens
return word
wordU = unicode(word, 'utf8')
return wordU.lower().encode('utf8') # lowercase and then convert back to bytestring
# endclass ArxmlivSource
| 12,864
|
Python
|
.py
| 295
| 34.810169
| 114
| 0.626429
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,138
|
gensim_xml.py
|
piskvorky_gensim/gensim/examples/dmlcz/gensim_xml.py
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s LANGUAGE METHOD
Generate similar.xml files, using a previously built model for METHOD.
Example: ./gensim_xml.py eng lsi
"""
import logging
import sys
import os.path
from gensim.corpora import dmlcorpus, MmCorpus
from gensim.similarities import MatrixSimilarity, SparseMatrixSimilarity
import gensim_build
# set to True to do everything EXCEPT actually writing out similar.xml files to disk.
# similar.xml files are NOT written if DRY_RUN is true.
DRY_RUN = False
# how many 'most similar' documents to store in each similar.xml?
MIN_SCORE = 0.0 # prune based on similarity score (all below MIN_SCORE are ignored)
MAX_SIMILAR = 10 # prune based on rank (at most MAX_SIMILAR are stored). set to 0 to store all of them (no limit).
# if there are no similar articles (after the pruning), do we still want to generate similar.xml?
SAVE_EMPTY = True
# xml template for similar articles
ARTICLE = """
<article weight="%(score)f">
<authors>
<author>%(author)s</author>
</authors>
<title>%(title)s</title>
<suffix>%(suffix)s</suffix>
<links>
<link source="%(source)s" id="%(intId)s" path="%(pathId)s"/>
</links>
</article>"""
# template for the whole similar.xml file (will be filled with multiple ARTICLE instances)
SIMILAR = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<related>%s
</related>
"""
def generateSimilar(corpus, index, method):
for docNo, topSims in enumerate(index): # for each document
# store similarities to the following file
outfile = os.path.join(corpus.articleDir(docNo), 'similar_%s.xml' % method)
articles = [] # collect similars in this list
for docNo2, score in topSims: # for each most similar article
if score > MIN_SCORE and docNo != docNo2:
source, (intId, pathId) = corpus.documents[docNo2]
meta = corpus.getMeta(docNo2)
suffix, author, title = '', meta.get('author', ''), meta.get('title', '')
articles.append(ARTICLE % locals()) # add the similar article to output
if len(articles) >= MAX_SIMILAR:
break
# now `articles` holds multiple strings in similar_*.xml format
if SAVE_EMPTY or articles:
output = ''.join(articles) # concat all similars to one string
if not DRY_RUN: # only open output files for writing if DRY_RUN is false
logging.info("generating %s (%i similars)", outfile, len(articles))
outfile = open(outfile, 'w')
outfile.write(SIMILAR % output) # add xml headers and print to file
outfile.close()
else:
logging.info("would be generating %s (%i similars):%s\n", outfile, len(articles), output)
else:
logging.debug("skipping %s (no similar found)", outfile)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
method = sys.argv[2].strip().lower()
logging.info("loading corpus mappings")
config = dmlcorpus.DmlConfig('%s_%s' % (gensim_build.PREFIX, language),
resultDir=gensim_build.RESULT_DIR, acceptLangs=[language])
logging.info("loading word id mapping from %s", config.resultFile('wordids.txt'))
id2word = dmlcorpus.DmlCorpus.loadDictionary(config.resultFile('wordids.txt'))
logging.info("loaded %i word ids", len(id2word))
corpus = dmlcorpus.DmlCorpus.load(config.resultFile('.pkl'))
input = MmCorpus(config.resultFile('_%s.mm' % method))
assert len(input) == len(corpus), \
"corpus size mismatch (%i vs %i): run ./gensim_genmodel.py again" % (len(input), len(corpus))
# initialize structure for similarity queries
if method == 'lsi' or method == 'rp': # for these methods, use dense vectors
index = MatrixSimilarity(input, num_best=MAX_SIMILAR + 1, num_features=input.numTerms)
else:
index = SparseMatrixSimilarity(input, num_best=MAX_SIMILAR + 1)
index.normalize = False
generateSimilar(corpus, index, method)
logging.info("finished running %s", program)
| 4,709
|
Python
|
.py
| 95
| 42.557895
| 115
| 0.658239
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,139
|
dmlcorpus.py
|
piskvorky_gensim/gensim/examples/dmlcz/dmlcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
Corpus for the DML-CZ project.
"""
import logging
import os.path
from gensim import interfaces, matutils
import dictionary # for constructing word->id mappings
logger = logging.getLogger('gensim.corpora.dmlcorpus')
class DmlConfig:
"""
DmlConfig contains parameters necessary for the abstraction of a 'corpus of
articles' (see the `DmlCorpus` class).
Articles may come from different sources (=different locations on disk/network,
different file formats etc.), so the main purpose of DmlConfig is to keep all
sources in one place.
Apart from glueing sources together, DmlConfig also decides where to store
output files and which articles to accept for the corpus (= an additional filter
over the sources).
"""
def __init__(self, configId, resultDir, acceptLangs=None):
self.resultDir = resultDir # output files will be stored in this directory
self.configId = configId
self.sources = {} # all article sources; see sources.DmlSource class for an example of source
if acceptLangs is None: # which languages to accept
acceptLangs = {'any'} # if not specified, accept all languages (including unknown/unspecified)
self.acceptLangs = set(acceptLangs)
logger.info('initialized %s', self)
def resultFile(self, fname):
return os.path.join(self.resultDir, self.configId + '_' + fname)
def acceptArticle(self, metadata):
lang = metadata.get('language', 'unk')
if 'any' not in self.acceptLangs and lang not in self.acceptLangs:
return False
return True
def addSource(self, source):
sourceId = str(source)
assert sourceId not in self.sources, "source %s already present in the config!" % sourceId
self.sources[sourceId] = source
def __str__(self):
return "%s<id=%s, sources=[%s], acceptLangs=[%s]>" % (
self.__class__.__name__, self.configId, ', '.join(self.sources.iterkeys()), ', '.join(self.acceptLangs)
)
# endclass DmlConfig
class DmlCorpus(interfaces.CorpusABC):
"""
DmlCorpus implements a collection of articles. It is initialized via a DmlConfig
object, which holds information about where to look for the articles and how
to process them.
Apart from being a regular corpus (bag-of-words iterable with a `len()` method),
DmlCorpus has methods for building a dictionary (mapping between words and
their ids).
"""
def __init__(self):
self.documents = []
self.config = None
self.dictionary = dictionary.Dictionary()
def __len__(self):
return len(self.documents)
def __iter__(self):
"""
The function that defines a corpus -- iterating over the corpus yields
bag-of-words vectors, one for each document.
A bag-of-words vector is simply a list of ``(tokenId, tokenCount)`` 2-tuples.
"""
for docNo, (sourceId, docUri) in enumerate(self.documents):
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
yield self.dictionary.doc2bow(words, allowUpdate=False)
def buildDictionary(self):
"""
Populate dictionary mapping and statistics.
This is done by sequentially retrieving the article fulltexts, splitting
them into tokens and converting tokens to their ids (creating new ids as
necessary).
"""
logger.info("creating dictionary from %i articles", len(self.documents))
self.dictionary = dictionary.Dictionary()
numPositions = 0
for docNo, (sourceId, docUri) in enumerate(self.documents):
if docNo % 1000 == 0:
logger.info("PROGRESS: at document #%i/%i (%s, %s)", docNo, len(self.documents), sourceId, docUri)
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
numPositions += len(words)
# convert to bag-of-words, but ignore the result -- here we only care about updating token ids
_ = self.dictionary.doc2bow(words, allowUpdate=True) # noqa:F841
logger.info(
"built %s from %i documents (total %i corpus positions)",
self.dictionary, len(self.documents), numPositions
)
def processConfig(self, config, shuffle=False):
"""
Parse the directories specified in the config, looking for suitable articles.
This updates the self.documents var, which keeps a list of (source id,
article uri) 2-tuples. Each tuple is a unique identifier of one article.
Note that some articles are ignored based on config settings (for example
if the article's language doesn't match any language specified in the
config etc.).
"""
self.config = config
self.documents = []
logger.info("processing config %s", config)
for sourceId, source in config.sources.iteritems():
logger.info("processing source '%s'", sourceId)
accepted = []
for articleUri in source.findArticles():
meta = source.getMeta(articleUri) # retrieve metadata (= dictionary of key->value)
if config.acceptArticle(meta): # do additional filtering on articles, based on the article's metadata
accepted.append((sourceId, articleUri))
logger.info("accepted %i articles for source '%s'", len(accepted), sourceId)
self.documents.extend(accepted)
if not self.documents:
logger.warning('no articles at all found from the config; something went wrong!')
if shuffle:
logger.info("shuffling %i documents for random order", len(self.documents))
import random
random.shuffle(self.documents)
logger.info("accepted total of %i articles for %s", len(self.documents), str(config))
def saveDictionary(self, fname):
logger.info("saving dictionary mapping to %s", fname)
fout = open(fname, 'w')
for tokenId, token in self.dictionary.id2token.iteritems():
fout.write("%i\t%s\n" % (tokenId, token))
fout.close()
@staticmethod
def loadDictionary(fname):
result = {}
for lineNo, line in enumerate(open(fname)):
pair = line[:-1].split('\t')
if len(pair) != 2:
continue
wordId, word = pair
result[int(wordId)] = word
return result
def saveDocuments(self, fname):
logger.info("saving documents mapping to %s", fname)
fout = open(fname, 'w')
for docNo, docId in enumerate(self.documents):
sourceId, docUri = docId
intId, pathId = docUri
fout.write("%i\t%s\n" % (docNo, repr(docId)))
fout.close()
def saveAsText(self):
"""
Store the corpus to disk, in a human-readable text format.
This actually saves multiple files:
1. Pure document-term co-occurence frequency counts, as a Matrix Market file.
2. Token to integer mapping, as a text file.
3. Document to document URI mapping, as a text file.
The exact filesystem paths and filenames are determined from the config.
"""
self.saveDictionary(self.config.resultFile('wordids.txt'))
self.saveDocuments(self.config.resultFile('docids.txt'))
matutils.MmWriter.writeCorpus(self.config.resultFile('bow.mm'), self)
def articleDir(self, docNo):
"""
Return absolute normalized path on filesystem to article no. `docNo`.
"""
sourceId, (_, outPath) = self.documents[docNo]
source = self.config.sources[sourceId]
return os.path.join(source.baseDir, outPath)
def getMeta(self, docNo):
"""
Return metadata for article no. `docNo`.
"""
sourceId, uri = self.documents[docNo]
source = self.config.sources[sourceId]
return source.getMeta(uri)
# endclass DmlCorpus
| 8,459
|
Python
|
.py
| 176
| 39.386364
| 118
| 0.652913
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,140
|
gensim_build.py
|
piskvorky_gensim/gensim/examples/dmlcz/gensim_build.py
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s LANGUAGE
Process the repository, accepting articles in LANGUAGE (or 'any').
Store the word co-occurence matrix and id mappings, which are needed for subsequent processing.
Example: ./gensim_build.py eng
"""
import logging
import sys
import os.path
from gensim.corpora import sources, dmlcorpus
PREFIX = 'dmlcz'
AT_HOME = False
if AT_HOME:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/Users/kofola/workspace/dml/data/dmlcz/'),
sources.DmlSource('numdam', '/Users/kofola/workspace/dml/data/numdam/'),
sources.ArxmlivSource('arxmliv', '/Users/kofola/workspace/dml/data/arxmliv/'),
]
RESULT_DIR = '/Users/kofola/workspace/dml/data/results'
else:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/data/dmlcz/data/share'),
sources.DmlSource('numdam', '/data/dmlcz/data/numdam'),
sources.ArxmlivSource('arxmliv', '/data/dmlcz/data/arxmliv'),
]
RESULT_DIR = '/data/dmlcz/xrehurek/results'
def buildDmlCorpus(config):
dml = dmlcorpus.DmlCorpus()
dml.processConfig(config, shuffle=True)
dml.buildDictionary()
dml.dictionary.filterExtremes(noBelow=5, noAbove=0.3) # ignore too (in)frequent words
dml.save(config.resultFile('.pkl'))
dml.saveAsText() # save id mappings and documents as text data (matrix market format)
return dml
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
# construct the config, which holds information about sources, data file filenames etc.
config = dmlcorpus.DmlConfig('%s_%s' % (PREFIX, language), resultDir=RESULT_DIR, acceptLangs=[language])
for source in SOURCE_LIST:
config.addSource(source)
buildDmlCorpus(config)
logging.info("finished running %s", program)
| 2,297
|
Python
|
.py
| 54
| 37.796296
| 108
| 0.70207
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,141
|
aggregation.py
|
piskvorky_gensim/gensim/topic_coherence/aggregation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module contains functions to perform aggregation on a list of values obtained from the confirmation measure."""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def arithmetic_mean(confirmed_measures):
"""
Perform the arithmetic mean aggregation on the output obtained from
the confirmation measure module.
Parameters
----------
confirmed_measures : list of float
List of calculated confirmation measure on each set in the segmented topics.
Returns
-------
`numpy.float`
Arithmetic mean of all the values contained in confirmation measures.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence.aggregation import arithmetic_mean
>>> arithmetic_mean([1.1, 2.2, 3.3, 4.4])
2.75
"""
return np.mean(confirmed_measures)
| 1,074
|
Python
|
.py
| 29
| 32.37931
| 119
| 0.697585
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,142
|
indirect_confirmation_measure.py
|
piskvorky_gensim/gensim/topic_coherence/indirect_confirmation_measure.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
r"""This module contains functions to compute confirmation on a pair of words or word subsets.
Notes
-----
The advantage of indirect confirmation measure is that it computes similarity of words in :math:`W'` and
:math:`W^{*}` with respect to direct confirmations to all words. Eg. Suppose `x` and `z` are both competing
brands of cars, which semantically support each other. However, both brands are seldom mentioned
together in documents in the reference corpus. But their confirmations to other words like “road”
or “speed” do strongly correlate. This would be reflected by an indirect confirmation measure.
Thus, indirect confirmation measures may capture semantic support that direct measures would miss.
The formula used to compute indirect confirmation measure is
.. math::
\widetilde{m}_{sim(m, \gamma)}(W', W^{*}) = s_{sim}(\vec{v}^{\,}_{m,\gamma}(W'), \vec{v}^{\,}_{m,\gamma}(W^{*}))
where :math:`s_{sim}` can be cosine, dice or jaccard similarity and
.. math::
\vec{v}^{\,}_{m,\gamma}(W') = \Bigg \{{\sum_{w_{i} \in W'}^{ } m(w_{i}, w_{j})^{\gamma}}\Bigg \}_{j = 1,...,|W|}
"""
import itertools
import logging
import numpy as np
import scipy.sparse as sps
from gensim.topic_coherence.direct_confirmation_measure import aggregate_segment_sims, log_ratio_measure
logger = logging.getLogger(__name__)
def word2vec_similarity(segmented_topics, accumulator, with_std=False, with_support=False):
"""For each topic segmentation, compute average cosine similarity using a
:class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`.
Parameters
----------
segmented_topics : list of lists of (int, `numpy.ndarray`)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_set`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator.
with_std : bool, optional
True to also include standard deviation across topic segment sets
in addition to the mean coherence for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as
the number of pairwise similarity comparisons were used to compute the overall topic coherence.
Returns
-------
list of (float[, float[, int]])
Сosine word2vec similarities per topic (with std/support if `with_std`, `with_support`).
Examples
--------
.. sourcecode:: pycon
>>> import numpy as np
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure
>>> from gensim.topic_coherence import text_analysis
>>>
>>> # create segmentation
>>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5)
>>>
>>> # should be (0.726752426218 0.00695475919227)
>>> mean, std = indirect_confirmation_measure.word2vec_similarity(segmentation, accumulator, with_std=True)[0]
"""
topic_coherences = []
total_oov = 0
for topic_index, topic_segments in enumerate(segmented_topics):
segment_sims = []
num_oov = 0
for w_prime, w_star in topic_segments:
if not hasattr(w_prime, '__iter__'):
w_prime = [w_prime]
if not hasattr(w_star, '__iter__'):
w_star = [w_star]
try:
segment_sims.append(accumulator.ids_similarity(w_prime, w_star))
except ZeroDivisionError:
num_oov += 1
if num_oov > 0:
total_oov += 1
logger.warning(
"%d terms for topic %d are not in word2vec model vocabulary",
num_oov, topic_index)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
if total_oov > 0:
logger.warning("%d terms for are not in word2vec model vocabulary", total_oov)
return topic_coherences
def cosine_similarity(segmented_topics, accumulator, topics, measure='nlr',
gamma=1, with_std=False, with_support=False):
"""Calculate the indirect cosine measure.
Parameters
----------
segmented_topics: list of lists of (int, `numpy.ndarray`)
Output from the segmentation module of the segmented topics.
accumulator: :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Output from the probability_estimation module. Is an topics: Topics obtained from the trained topic model.
measure : str, optional
Direct confirmation measure to be used. Supported values are "nlr" (normalized log ratio).
gamma: float, optional
Gamma value for computing :math:`W'` and :math:`W^{*}` vectors.
with_std : bool
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic; default is False.
with_support : bool
True to also include support across topic segments. The support is defined as the number of pairwise similarity
comparisons were used to compute the overall topic coherence.
Returns
-------
list
List of indirect cosine similarity measure for each topic.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure, text_analysis
>>> import numpy as np
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # create topics
>>> topics = [np.array([1, 2])]
>>>
>>> # create segmentation
>>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
>>> obtained = indirect_confirmation_measure.cosine_similarity(segmentation, accumulator, topics, 'nlr', 1)
>>> print(obtained[0])
0.623018926945
"""
context_vectors = ContextVectorComputer(measure, topics, accumulator, gamma)
topic_coherences = []
for topic_words, topic_segments in zip(topics, segmented_topics):
topic_words = tuple(topic_words) # because tuples are hashable
segment_sims = np.zeros(len(topic_segments))
for i, (w_prime, w_star) in enumerate(topic_segments):
w_prime_cv = context_vectors[w_prime, topic_words]
w_star_cv = context_vectors[w_star, topic_words]
segment_sims[i] = _cossim(w_prime_cv, w_star_cv)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
class ContextVectorComputer:
"""Lazily compute context vectors for topic segments.
Parameters
----------
measure: str
Confirmation measure.
topics: list of numpy.array
Topics.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
gamma: float
Value for computing vectors.
Attributes
----------
sim_cache: dict
Cache similarities between tokens (pairs of word ids), e.g. (1, 2).
context_vector_cache: dict
Mapping from (segment, topic_words) --> context_vector.
Example
-------
.. sourcecode:: pycon
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure, text_analysis
>>> import numpy as np
>>>
>>> # create measure, topics
>>> measure = 'nlr'
>>> topics = [np.array([1, 2])]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5)
>>> cont_vect_comp = indirect_confirmation_measure.ContextVectorComputer(measure, topics, accumulator, 1)
>>> cont_vect_comp.mapping
{1: 0, 2: 1}
>>> cont_vect_comp.vocab_size
2
"""
def __init__(self, measure, topics, accumulator, gamma):
if measure == 'nlr':
self.similarity = _pair_npmi
else:
raise ValueError(
"The direct confirmation measure you entered is not currently supported.")
self.mapping = _map_to_contiguous(topics)
self.vocab_size = len(self.mapping)
self.accumulator = accumulator
self.gamma = gamma
self.sim_cache = {}
self.context_vector_cache = {}
def __getitem__(self, idx):
return self.compute_context_vector(*idx)
def compute_context_vector(self, segment_word_ids, topic_word_ids):
"""Check if (segment_word_ids, topic_word_ids) context vector has been cached.
Parameters
----------
segment_word_ids: list
Ids of words in segment.
topic_word_ids: list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
If context vector has been cached, then return corresponding context vector,
else compute, cache, and return.
"""
key = _key_for_segment(segment_word_ids, topic_word_ids)
context_vector = self.context_vector_cache.get(key, None)
if context_vector is None:
context_vector = self._make_seg(segment_word_ids, topic_word_ids)
self.context_vector_cache[key] = context_vector
return context_vector
def _make_seg(self, segment_word_ids, topic_word_ids):
"""Return context vectors for segmentation (Internal helper function).
Parameters
----------
segment_word_ids : iterable or int
Ids of words in segment.
topic_word_ids : list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
Matrix in Compressed Sparse Row format
"""
context_vector = sps.lil_matrix((self.vocab_size, 1))
if not hasattr(segment_word_ids, '__iter__'):
segment_word_ids = (segment_word_ids,)
for w_j in topic_word_ids:
idx = (self.mapping[w_j], 0)
for pair in (tuple(sorted((w_i, w_j))) for w_i in segment_word_ids):
if pair not in self.sim_cache:
self.sim_cache[pair] = self.similarity(pair, self.accumulator)
context_vector[idx] += self.sim_cache[pair] ** self.gamma
return context_vector.tocsr()
def _pair_npmi(pair, accumulator):
"""Compute normalized pairwise mutual information (**NPMI**) between a pair of words.
Parameters
----------
pair : (int, int)
The pair of words (word_id1, word_id2).
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
Return
------
float
NPMI between a pair of words.
"""
return log_ratio_measure([[pair]], accumulator, True)[0]
def _cossim(cv1, cv2):
return cv1.T.dot(cv2)[0, 0] / (_magnitude(cv1) * _magnitude(cv2))
def _magnitude(sparse_vec):
return np.sqrt(np.sum(sparse_vec.data ** 2))
def _map_to_contiguous(ids_iterable):
uniq_ids = {}
n = 0
for id_ in itertools.chain.from_iterable(ids_iterable):
if id_ not in uniq_ids:
uniq_ids[id_] = n
n += 1
return uniq_ids
def _key_for_segment(segment, topic_words):
"""A segment may have a single number of an iterable of them."""
segment_key = tuple(segment) if hasattr(segment, '__iter__') else segment
return segment_key, topic_words
| 12,786
|
Python
|
.py
| 275
| 38.545455
| 119
| 0.639778
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,143
|
direct_confirmation_measure.py
|
piskvorky_gensim/gensim/topic_coherence/direct_confirmation_measure.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module contains functions to compute direct confirmation on a pair of words or word subsets."""
import logging
import numpy as np
logger = logging.getLogger(__name__)
# Should be small. Value as suggested in paper http://svn.aksw.org/papers/2015/WSDM_Topic_Evaluation/public.pdf
EPSILON = 1e-12
def log_conditional_probability(segmented_topics, accumulator, with_std=False, with_support=False):
r"""Calculate the log-conditional-probability measure which is used by coherence measures such as `U_mass`.
This is defined as :math:`m_{lc}(S_i) = log \frac{P(W', W^{*}) + \epsilon}{P(W^{*})}`.
Parameters
----------
segmented_topics : list of lists of (int, int)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_pre`,
:func:`~gensim.topic_coherence.segmentation.s_one_one`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from :mod:`gensim.topic_coherence.probability_estimation`.
with_std : bool, optional
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as the number of pairwise
similarity comparisons were used to compute the overall topic coherence.
Returns
-------
list of float
Log conditional probabilities measurement for each topic.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import direct_confirmation_measure, text_analysis
>>> from collections import namedtuple
>>>
>>> # Create dictionary
>>> id2token = {1: 'test', 2: 'doc'}
>>> token2id = {v: k for k, v in id2token.items()}
>>> dictionary = namedtuple('Dictionary', 'token2id, id2token')(token2id, id2token)
>>>
>>> # Initialize segmented topics and accumulator
>>> segmentation = [[(1, 2)]]
>>>
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # result should be ~ ln(1 / 2) = -0.693147181
>>> result = direct_confirmation_measure.log_conditional_probability(segmentation, accumulator)[0]
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
try:
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
m_lc_i = np.log(((co_occur_count / num_docs) + EPSILON) / (w_star_count / num_docs))
except KeyError:
m_lc_i = 0.0
except ZeroDivisionError:
# if w_star_count==0, it will throw exception of divided by zero
m_lc_i = 0.0
segment_sims.append(m_lc_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
def aggregate_segment_sims(segment_sims, with_std, with_support):
"""Compute various statistics from the segment similarities generated via set pairwise comparisons
of top-N word lists for a single topic.
Parameters
----------
segment_sims : iterable of float
Similarity values to aggregate.
with_std : bool
Set to True to include standard deviation.
with_support : bool
Set to True to include number of elements in `segment_sims` as a statistic in the results returned.
Returns
-------
(float[, float[, int]])
Tuple with (mean[, std[, support]]).
Examples
---------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import direct_confirmation_measure
>>>
>>> segment_sims = [0.2, 0.5, 1., 0.05]
>>> direct_confirmation_measure.aggregate_segment_sims(segment_sims, True, True)
(0.4375, 0.36293077852394939, 4)
>>> direct_confirmation_measure.aggregate_segment_sims(segment_sims, False, False)
0.4375
"""
mean = np.mean(segment_sims)
stats = [mean]
if with_std:
stats.append(np.std(segment_sims))
if with_support:
stats.append(len(segment_sims))
return stats[0] if len(stats) == 1 else tuple(stats)
def log_ratio_measure(segmented_topics, accumulator, normalize=False, with_std=False, with_support=False):
r"""Compute log ratio measure for `segment_topics`.
Parameters
----------
segmented_topics : list of lists of (int, int)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_pre`,
:func:`~gensim.topic_coherence.segmentation.s_one_one`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from :mod:`gensim.topic_coherence.probability_estimation`.
normalize : bool, optional
Details in the "Notes" section.
with_std : bool, optional
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as the number of pairwise
similarity comparisons were used to compute the overall topic coherence.
Notes
-----
If `normalize=False`:
Calculate the log-ratio-measure, popularly known as **PMI** which is used by coherence measures such as `c_v`.
This is defined as :math:`m_{lr}(S_i) = log \frac{P(W', W^{*}) + \epsilon}{P(W') * P(W^{*})}`
If `normalize=True`:
Calculate the normalized-log-ratio-measure, popularly knowns as **NPMI**
which is used by coherence measures such as `c_v`.
This is defined as :math:`m_{nlr}(S_i) = \frac{m_{lr}(S_i)}{-log(P(W', W^{*}) + \epsilon)}`
Returns
-------
list of float
Log ratio measurements for each topic.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import direct_confirmation_measure, text_analysis
>>> from collections import namedtuple
>>>
>>> # Create dictionary
>>> id2token = {1: 'test', 2: 'doc'}
>>> token2id = {v: k for k, v in id2token.items()}
>>> dictionary = namedtuple('Dictionary', 'token2id, id2token')(token2id, id2token)
>>>
>>> # Initialize segmented topics and accumulator
>>> segmentation = [[(1, 2)]]
>>>
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # result should be ~ ln{(1 / 5) / [(3 / 5) * (2 / 5)]} = -0.182321557
>>> result = direct_confirmation_measure.log_ratio_measure(segmentation, accumulator)[0]
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
w_prime_count = accumulator[w_prime]
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
if normalize:
# For normalized log ratio measure
numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0]
co_doc_prob = co_occur_count / num_docs
m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))
else:
# For log ratio measure without normalization
numerator = (co_occur_count / num_docs) + EPSILON
denominator = (w_prime_count / num_docs) * (w_star_count / num_docs)
m_lr_i = np.log(numerator / denominator)
segment_sims.append(m_lr_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
| 8,366
|
Python
|
.py
| 174
| 40.045977
| 118
| 0.634625
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,144
|
text_analysis.py
|
piskvorky_gensim/gensim/topic_coherence/text_analysis.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module contains classes for analyzing the texts of a corpus to accumulate
statistical information about word occurrences."""
import itertools
import logging
import multiprocessing as mp
import sys
from collections import Counter
import numpy as np
import scipy.sparse as sps
from gensim import utils
from gensim.models.word2vec import Word2Vec
logger = logging.getLogger(__name__)
def _ids_to_words(ids, dictionary):
"""Convert an iterable of ids to their corresponding words using a dictionary.
Abstract away the differences between the HashDictionary and the standard one.
Parameters
----------
ids: dict
Dictionary of ids and their words.
dictionary: :class:`~gensim.corpora.dictionary.Dictionary`
Input gensim dictionary
Returns
-------
set
Corresponding words.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import text_analysis
>>>
>>> dictionary = Dictionary()
>>> ids = {1: 'fake', 4: 'cats'}
>>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'}
>>>
>>> text_analysis._ids_to_words(ids, dictionary)
set(['cats', 'fake'])
"""
if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary
setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()})
top_words = set()
for word_id in ids:
word = dictionary.id2token[word_id]
if isinstance(word, set):
top_words = top_words.union(word)
else:
top_words.add(word)
return top_words
class BaseAnalyzer:
"""Base class for corpus and text analyzers.
Attributes
----------
relevant_ids : dict
Mapping
_vocab_size : int
Size of vocabulary.
id2contiguous : dict
Mapping word_id -> number.
log_every : int
Interval for logging.
_num_docs : int
Number of documents.
"""
def __init__(self, relevant_ids):
"""
Parameters
----------
relevant_ids : dict
Mapping
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import text_analysis
>>> ids = {1: 'fake', 4: 'cats'}
>>> base = text_analysis.BaseAnalyzer(ids)
>>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
>>> print(base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs)
{1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
"""
self.relevant_ids = relevant_ids
self._vocab_size = len(self.relevant_ids)
self.id2contiguous = {word_id: n for n, word_id in enumerate(self.relevant_ids)}
self.log_every = 1000
self._num_docs = 0
@property
def num_docs(self):
return self._num_docs
@num_docs.setter
def num_docs(self, num):
self._num_docs = num
if self._num_docs % self.log_every == 0:
logger.info(
"%s accumulated stats from %d documents",
self.__class__.__name__, self._num_docs)
def analyze_text(self, text, doc_num=None):
raise NotImplementedError("Base classes should implement analyze_text.")
def __getitem__(self, word_or_words):
if isinstance(word_or_words, str) or not hasattr(word_or_words, '__iter__'):
return self.get_occurrences(word_or_words)
else:
return self.get_co_occurrences(*word_or_words)
def get_occurrences(self, word_id):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
return self._get_occurrences(self.id2contiguous[word_id])
def _get_occurrences(self, word_id):
raise NotImplementedError("Base classes should implement occurrences")
def get_co_occurrences(self, word_id1, word_id2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2])
def _get_co_occurrences(self, word_id1, word_id2):
raise NotImplementedError("Base classes should implement co_occurrences")
class UsesDictionary(BaseAnalyzer):
"""A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.
The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id
mapping.
Attributes
----------
relevant_words : set
Set of words that occurrences should be accumulated for.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
token2id : dict
Mapping from :class:`~gensim.corpora.dictionary.Dictionary`
"""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : dict
Mapping
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import text_analysis
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> ids = {1: 'foo', 2: 'bar'}
>>> dictionary = Dictionary([['foo', 'bar', 'baz'], ['foo', 'bar', 'bar', 'baz']])
>>> udict = text_analysis.UsesDictionary(ids, dictionary)
>>>
>>> print(udict.relevant_words)
set([u'foo', u'baz'])
"""
super(UsesDictionary, self).__init__(relevant_ids)
self.relevant_words = _ids_to_words(self.relevant_ids, dictionary)
self.dictionary = dictionary
self.token2id = dictionary.token2id
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self._get_occurrences(self.id2contiguous[word_id])
def _word2_contiguous_id(self, word):
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self.id2contiguous[word_id]
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
word_id1 = self._word2_contiguous_id(word1)
word_id2 = self._word2_contiguous_id(word2)
return self._get_co_occurrences(word_id1, word_id2)
class InvertedIndexBased(BaseAnalyzer):
"""Analyzer that builds up an inverted index to accumulate stats."""
def __init__(self, *args):
"""
Parameters
----------
args : dict
Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import text_analysis
>>>
>>> ids = {1: 'fake', 4: 'cats'}
>>> ininb = text_analysis.InvertedIndexBased(ids)
>>>
>>> print(ininb._inverted_index)
[set([]) set([])]
"""
super(InvertedIndexBased, self).__init__(*args)
self._inverted_index = np.array([set() for _ in range(self._vocab_size)])
def _get_occurrences(self, word_id):
return len(self._inverted_index[word_id])
def _get_co_occurrences(self, word_id1, word_id2):
s1 = self._inverted_index[word_id1]
s2 = self._inverted_index[word_id2]
return len(s1.intersection(s2))
def index_to_dict(self):
contiguous2id = {n: word_id for word_id, n in self.id2contiguous.items()}
return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)}
class CorpusAccumulator(InvertedIndexBased):
"""Gather word occurrence stats from a corpus by iterating over its BoW representation."""
def analyze_text(self, text, doc_num=None):
"""Build an inverted index from a sequence of corpus texts."""
doc_words = frozenset(x[0] for x in text)
top_ids_in_doc = self.relevant_ids.intersection(doc_words)
for word_id in top_ids_in_doc:
self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)
def accumulate(self, corpus):
for document in corpus:
self.analyze_text(document)
self.num_docs += 1
return self
class WindowedTextsAnalyzer(UsesDictionary):
"""Gather some stats about relevant terms of a corpus by iterating over windows of texts."""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : set of int
Relevant id
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary instance with mappings for the relevant_ids.
"""
super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary)
self._none_token = self._vocab_size # see _iter_texts for use of none token
def accumulate(self, texts, window_size):
relevant_texts = self._iter_texts(texts)
windows = utils.iter_windows(
relevant_texts, window_size, ignore_below_size=False, include_doc_num=True)
for doc_num, virtual_document in windows:
if len(virtual_document) > 0:
self.analyze_text(virtual_document, doc_num)
self.num_docs += 1
return self
def _iter_texts(self, texts):
dtype = np.uint16 if np.iinfo(np.uint16).max >= self._vocab_size else np.uint32
for text in texts:
ids = (
self.id2contiguous[self.token2id[w]] if w in self.relevant_words else self._none_token
for w in text
)
yield np.fromiter(ids, dtype=dtype, count=len(text))
class InvertedIndexAccumulator(WindowedTextsAnalyzer, InvertedIndexBased):
"""Build an inverted index from a sequence of corpus texts."""
def analyze_text(self, window, doc_num=None):
for word_id in window:
if word_id is not self._none_token:
self._inverted_index[word_id].add(self._num_docs)
class WordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences and co-occurrences from a sequence of corpus texts."""
def __init__(self, *args):
super(WordOccurrenceAccumulator, self).__init__(*args)
self._occurrences = np.zeros(self._vocab_size, dtype='uint32')
self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32')
self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token
self._counter = Counter()
def __str__(self):
return self.__class__.__name__
def accumulate(self, texts, window_size):
self._co_occurrences = self._co_occurrences.tolil()
self.partial_accumulate(texts, window_size)
self._symmetrize()
return self
def partial_accumulate(self, texts, window_size):
"""Meant to be called several times to accumulate partial results.
Notes
-----
The final accumulation should be performed with the `accumulate` method as opposed to this one.
This method does not ensure the co-occurrence matrix is in lil format and does not
symmetrize it after accumulation.
"""
self._current_doc_num = -1
self._token_at_edge = None
self._counter.clear()
super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)
for combo, count in self._counter.items():
self._co_occurrences[combo] += count
return self
def analyze_text(self, window, doc_num=None):
self._slide_window(window, doc_num)
mask = self._uniq_words[:-1] # to exclude none token
if mask.any():
self._occurrences[mask] += 1
self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))
def _slide_window(self, window, doc_num):
if doc_num != self._current_doc_num:
self._uniq_words[:] = False
self._uniq_words[np.unique(window)] = True
self._current_doc_num = doc_num
else:
self._uniq_words[self._token_at_edge] = False
self._uniq_words[window[-1]] = True
self._token_at_edge = window[0]
def _symmetrize(self):
"""Word pairs may have been encountered in (i, j) and (j, i) order.
Notes
-----
Rather than enforcing a particular ordering during the update process,
we choose to symmetrize the co-occurrence matrix after accumulation has completed.
"""
co_occ = self._co_occurrences
co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts
self._co_occurrences = \
co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')
def _get_occurrences(self, word_id):
return self._occurrences[word_id]
def _get_co_occurrences(self, word_id1, word_id2):
return self._co_occurrences[word_id1, word_id2]
def merge(self, other):
self._occurrences += other._occurrences
self._co_occurrences += other._co_occurrences
self._num_docs += other._num_docs
class PatchedWordOccurrenceAccumulator(WordOccurrenceAccumulator):
"""Monkey patched for multiprocessing worker usage, to move some of the logic to the master process."""
def _iter_texts(self, texts):
return texts # master process will handle this
class ParallelWordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences in parallel.
Attributes
----------
processes : int
Number of processes to use; must be at least two.
args :
Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`).
kwargs :
Can include `batch_size`, which is the number of docs to send to a worker at a time.
If not included, it defaults to 64.
"""
def __init__(self, processes, *args, **kwargs):
super(ParallelWordOccurrenceAccumulator, self).__init__(*args)
if processes < 2:
raise ValueError(
"Must have at least 2 processes to run in parallel; got %d" % processes)
self.processes = processes
self.batch_size = kwargs.get('batch_size', 64)
def __str__(self):
return "%s<processes=%s, batch_size=%s>" % (
self.__class__.__name__, self.processes, self.batch_size)
def accumulate(self, texts, window_size):
workers, input_q, output_q = self.start_workers(window_size)
try:
self.queue_all_texts(input_q, texts, window_size)
interrupted = False
except KeyboardInterrupt:
logger.warn("stats accumulation interrupted; <= %d documents processed", self._num_docs)
interrupted = True
accumulators = self.terminate_workers(input_q, output_q, workers, interrupted)
return self.merge_accumulators(accumulators)
def start_workers(self, window_size):
"""Set up an input and output queue and start processes for each worker.
Notes
-----
The input queue is used to transmit batches of documents to the workers.
The output queue is used by workers to transmit the WordOccurrenceAccumulator instances.
Parameters
----------
window_size : int
Returns
-------
(list of lists)
Tuple of (list of workers, input queue, output queue).
"""
input_q = mp.Queue(maxsize=self.processes)
output_q = mp.Queue()
workers = []
for _ in range(self.processes):
accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)
worker.start()
workers.append(worker)
return workers, input_q, output_q
def yield_batches(self, texts):
"""Return a generator over the given texts that yields batches of `batch_size` texts at a time."""
batch = []
for text in self._iter_texts(texts):
batch.append(text)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def queue_all_texts(self, q, texts, window_size):
"""Sequentially place batches of texts on the given queue until `texts` is consumed.
The texts are filtered so that only those with at least one relevant token are queued.
"""
for batch_num, batch in enumerate(self.yield_batches(texts)):
q.put(batch, block=True)
before = self._num_docs / self.log_every
self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)
if before < (self._num_docs / self.log_every):
logger.info(
"%d batches submitted to accumulate stats from %d documents (%d virtual)",
(batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)
def terminate_workers(self, input_q, output_q, workers, interrupted=False):
"""Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each.
Warnings
--------
We do not use join here because it has been shown to have some issues
in Python 2.7 (and even in later versions). This method also closes both the input and output queue.
If `interrupted` is False (normal execution), a None value is placed on the input queue for
each worker. The workers are looking for this sentinel value and interpret it as a signal to
terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are
programmed to recover from this and continue on to transmit their results before terminating.
So in this instance, the sentinel values are not queued, but the rest of the execution
continues as usual.
"""
if not interrupted:
for _ in workers:
input_q.put(None, block=True)
accumulators = []
while len(accumulators) != len(workers):
accumulators.append(output_q.get())
logger.info("%d accumulators retrieved from output queue", len(accumulators))
for worker in workers:
if worker.is_alive():
worker.terminate()
input_q.close()
output_q.close()
return accumulators
def merge_accumulators(self, accumulators):
"""Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all
occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed
by all the individual accumulators.
"""
accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
for other_accumulator in accumulators:
accumulator.merge(other_accumulator)
# Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized.
# This is by design, to avoid unnecessary matrix additions/conversions during accumulation.
accumulator._symmetrize()
logger.info("accumulated word occurrence stats for %d virtual documents", accumulator.num_docs)
return accumulator
class AccumulatingWorker(mp.Process):
"""Accumulate stats from texts fed in from queue."""
def __init__(self, input_q, output_q, accumulator, window_size):
super(AccumulatingWorker, self).__init__()
self.input_q = input_q
self.output_q = output_q
self.accumulator = accumulator
self.accumulator.log_every = sys.maxsize # avoid logging in workers
self.window_size = window_size
def run(self):
try:
self._run()
except KeyboardInterrupt:
logger.info(
"%s interrupted after processing %d documents",
self.__class__.__name__, self.accumulator.num_docs)
except Exception:
logger.exception("worker encountered unexpected exception")
finally:
self.reply_to_master()
def _run(self):
batch_num = -1
n_docs = 0
while True:
batch_num += 1
docs = self.input_q.get(block=True)
if docs is None: # sentinel value
logger.debug("observed sentinel value; terminating")
break
self.accumulator.partial_accumulate(docs, self.window_size)
n_docs += len(docs)
logger.debug(
"completed batch %d; %d documents processed (%d virtual)",
batch_num, n_docs, self.accumulator.num_docs)
logger.debug(
"finished all batches; %d documents processed (%d virtual)",
n_docs, self.accumulator.num_docs)
def reply_to_master(self):
logger.info("serializing accumulator to return to master...")
self.output_q.put(self.accumulator, block=False)
logger.info("accumulator serialized")
class WordVectorsAccumulator(UsesDictionary):
"""Accumulate context vectors for words using word vector embeddings.
Attributes
----------
model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`)
If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
it should be a pre-trained Word2Vec context vectors.
model_kwargs:
if model is None, these keyword arguments will be passed through to the Word2Vec constructor.
"""
def __init__(self, relevant_ids, dictionary, model=None, **model_kwargs):
super(WordVectorsAccumulator, self).__init__(relevant_ids, dictionary)
self.model = model
self.model_kwargs = model_kwargs
def not_in_vocab(self, words):
uniq_words = set(utils.flatten(words))
return set(word for word in uniq_words if word not in self.model)
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
self.token2id[word] # is this a token or an id?
except KeyError:
word = self.dictionary.id2token[word]
return self.model.get_vecattr(word, 'count')
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
raise NotImplementedError("Word2Vec model does not support co-occurrence counting")
def accumulate(self, texts, window_size):
if self.model is not None:
logger.debug("model is already trained; no accumulation necessary")
return self
kwargs = self.model_kwargs.copy()
if window_size is not None:
kwargs['window'] = window_size
kwargs['min_count'] = kwargs.get('min_count', 1)
kwargs['sg'] = kwargs.get('sg', 1)
kwargs['hs'] = kwargs.get('hw', 0)
self.model = Word2Vec(**kwargs)
self.model.build_vocab(texts)
self.model.train(texts, total_examples=self.model.corpus_count, epochs=self.model.epochs)
self.model = self.model.wv # retain KeyedVectors
return self
def ids_similarity(self, ids1, ids2):
words1 = self._words_with_embeddings(ids1)
words2 = self._words_with_embeddings(ids2)
return self.model.n_similarity(words1, words2)
def _words_with_embeddings(self, ids):
if not hasattr(ids, '__iter__'):
ids = [ids]
words = [self.dictionary.id2token[word_id] for word_id in ids]
return [word for word in words if word in self.model]
| 24,317
|
Python
|
.py
| 527
| 37.119545
| 114
| 0.632851
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,145
|
__init__.py
|
piskvorky_gensim/gensim/topic_coherence/__init__.py
|
"""
This package contains implementation of the individual components of
the topic coherence pipeline.
"""
| 107
|
Python
|
.py
| 4
| 25.75
| 68
| 0.825243
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,146
|
segmentation.py
|
piskvorky_gensim/gensim/topic_coherence/segmentation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module contains functions to perform segmentation on a list of topics."""
import logging
logger = logging.getLogger(__name__)
def s_one_pre(topics):
r"""Performs segmentation on a list of topics.
Notes
-----
Segmentation is defined as
:math:`s_{pre} = {(W', W^{*}) | W' = w_{i}; W^{*} = {w_j}; w_{i}, w_{j} \in W; i > j}`.
Parameters
----------
topics : list of np.array
list of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int)
:math:`(W', W^{*})` for all unique topic ids.
Examples
--------
.. sourcecode:: pycon
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([1, 2, 3]), np.array([4, 5, 6])]
>>> segmentation.s_one_pre(topics)
[[(2, 1), (3, 1), (3, 2)], [(5, 4), (6, 4), (6, 5)]]
"""
s_one_pre_res = []
for top_words in topics:
s_one_pre_t = []
for w_prime_index, w_prime in enumerate(top_words[1:]):
for w_star in top_words[:w_prime_index + 1]:
s_one_pre_t.append((w_prime, w_star))
s_one_pre_res.append(s_one_pre_t)
return s_one_pre_res
def s_one_one(topics):
r"""Perform segmentation on a list of topics.
Segmentation is defined as
:math:`s_{one} = {(W', W^{*}) | W' = {w_i}; W^{*} = {w_j}; w_{i}, w_{j} \in W; i \neq j}`.
Parameters
----------
topics : list of `numpy.ndarray`
List of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int).
:math:`(W', W^{*})` for all unique topic ids.
Examples
-------
.. sourcecode:: pycon
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([1, 2, 3]), np.array([4, 5, 6])]
>>> segmentation.s_one_one(topics)
[[(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)], [(4, 5), (4, 6), (5, 4), (5, 6), (6, 4), (6, 5)]]
"""
s_one_one_res = []
for top_words in topics:
s_one_one_t = []
for w_prime_index, w_prime in enumerate(top_words):
for w_star_index, w_star in enumerate(top_words):
if w_prime_index == w_star_index:
continue
else:
s_one_one_t.append((w_prime, w_star))
s_one_one_res.append(s_one_one_t)
return s_one_one_res
def s_one_set(topics):
r"""Perform s_one_set segmentation on a list of topics.
Segmentation is defined as
:math:`s_{set} = {(W', W^{*}) | W' = {w_i}; w_{i} \in W; W^{*} = W}`
Parameters
----------
topics : list of `numpy.ndarray`
List of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int).
:math:`(W', W^{*})` for all unique topic ids.
Examples
--------
.. sourcecode:: pycon
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([9, 10, 7])]
>>> segmentation.s_one_set(topics)
[[(9, array([ 9, 10, 7])), (10, array([ 9, 10, 7])), (7, array([ 9, 10, 7]))]]
"""
s_one_set_res = []
for top_words in topics:
s_one_set_t = []
for w_prime in top_words:
s_one_set_t.append((w_prime, top_words))
s_one_set_res.append(s_one_set_t)
return s_one_set_res
| 3,708
|
Python
|
.py
| 102
| 29.284314
| 108
| 0.52979
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,147
|
probability_estimation.py
|
piskvorky_gensim/gensim/topic_coherence/probability_estimation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module contains functions to perform segmentation on a list of topics."""
import itertools
import logging
from gensim.topic_coherence.text_analysis import (
CorpusAccumulator, WordOccurrenceAccumulator, ParallelWordOccurrenceAccumulator,
WordVectorsAccumulator,
)
logger = logging.getLogger(__name__)
def p_boolean_document(corpus, segmented_topics):
"""Perform the boolean document probability estimation. Boolean document estimates the probability of a single word
as the number of documents in which the word occurs divided by the total number of documents.
Parameters
----------
corpus : iterable of list of (int, int)
The corpus of documents.
segmented_topics: list of (int, int).
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.CorpusAccumulator`
Word occurrence accumulator instance that can be used to lookup token frequencies and co-occurrence frequencies.
Examples
---------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>>
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [
... (w2id['system'], w2id['graph']),
... (w2id['computer'], w2id['graph']),
... (w2id['computer'], w2id['system'])
... ],
... [
... (w2id['computer'], w2id['graph']),
... (w2id['user'], w2id['graph']),
... (w2id['user'], w2id['computer'])]
... ]
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>>
>>> result = probability_estimation.p_boolean_document(corpus, segmented_topics)
>>> result.index_to_dict()
{10608: set([0]), 12736: set([1, 3]), 18451: set([5]), 5798: set([1, 2])}
"""
top_ids = unique_ids_from_segments(segmented_topics)
return CorpusAccumulator(top_ids).accumulate(corpus)
def p_boolean_sliding_window(texts, segmented_topics, dictionary, window_size, processes=1):
"""Perform the boolean sliding window probability estimation.
Parameters
----------
texts : iterable of iterable of str
Input text
segmented_topics: list of (int, int)
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Gensim dictionary mapping of the tokens and ids.
window_size : int
Size of the sliding window, 110 found out to be the ideal size for large corpora.
processes : int, optional
Number of process that will be used for
:class:`~gensim.topic_coherence.text_analysis.ParallelWordOccurrenceAccumulator`
Notes
-----
Boolean sliding window determines word counts using a sliding window. The window
moves over the documents one word token per step. Each step defines a new virtual
document by copying the window content. Boolean document is applied to these virtual
documents to compute word probabilities.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.WordOccurrenceAccumulator`
if `processes` = 1 OR
:class:`~gensim.topic_coherence.text_analysis.ParallelWordOccurrenceAccumulator`
otherwise. This is word occurrence accumulator instance that can be used to lookup
token frequencies and co-occurrence frequencies.
Examples
---------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>>
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [
... (w2id['system'], w2id['graph']),
... (w2id['computer'], w2id['graph']),
... (w2id['computer'], w2id['system'])
... ],
... [
... (w2id['computer'], w2id['graph']),
... (w2id['user'], w2id['graph']),
... (w2id['user'], w2id['computer'])]
... ]
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>> accumulator = probability_estimation.p_boolean_sliding_window(texts, segmented_topics, dictionary, 2)
>>>
>>> (accumulator[w2id['computer']], accumulator[w2id['user']], accumulator[w2id['system']])
(1, 3, 4)
"""
top_ids = unique_ids_from_segments(segmented_topics)
if processes <= 1:
accumulator = WordOccurrenceAccumulator(top_ids, dictionary)
else:
accumulator = ParallelWordOccurrenceAccumulator(processes, top_ids, dictionary)
logger.info("using %s to estimate probabilities from sliding windows", accumulator)
return accumulator.accumulate(texts, window_size)
def p_word2vec(texts, segmented_topics, dictionary, window_size=None, processes=1, model=None):
"""Train word2vec model on `texts` if `model` is not None.
Parameters
----------
texts : iterable of iterable of str
Input text
segmented_topics : iterable of iterable of str
Output from the segmentation of topics. Could be simply topics too.
dictionary : :class:`~gensim.corpora.dictionary`
Gensim dictionary mapping of the tokens and ids.
window_size : int, optional
Size of the sliding window.
processes : int, optional
Number of processes to use.
model : :class:`~gensim.models.word2vec.Word2Vec` or :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
it should be a pre-trained Word2Vec context vectors.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`
Text accumulator with trained context vectors.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>> from gensim.models import word2vec
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [
... (w2id['system'], w2id['graph']),
... (w2id['computer'], w2id['graph']),
... (w2id['computer'], w2id['system'])
... ],
... [
... (w2id['computer'], w2id['graph']),
... (w2id['user'], w2id['graph']),
... (w2id['user'], w2id['computer'])]
... ]
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>> sentences = [
... ['human', 'interface', 'computer'],
... ['survey', 'user', 'computer', 'system', 'response', 'time']
... ]
>>> model = word2vec.Word2Vec(sentences, vector_size=100, min_count=1)
>>> accumulator = probability_estimation.p_word2vec(texts, segmented_topics, dictionary, 2, 1, model)
"""
top_ids = unique_ids_from_segments(segmented_topics)
accumulator = WordVectorsAccumulator(
top_ids, dictionary, model, window=window_size, workers=processes)
return accumulator.accumulate(texts, window_size)
def unique_ids_from_segments(segmented_topics):
"""Return the set of all unique ids in a list of segmented topics.
Parameters
----------
segmented_topics: list of (int, int).
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
Returns
-------
set
Set of unique ids across all topic segments.
Example
-------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import probability_estimation
>>>
>>> segmentation = [[(1, 2)]]
>>> probability_estimation.unique_ids_from_segments(segmentation)
set([1, 2])
"""
unique_ids = set() # is a set of all the unique ids contained in topics.
for s_i in segmented_topics:
for word_id in itertools.chain.from_iterable(s_i):
if hasattr(word_id, '__iter__'):
unique_ids.update(word_id)
else:
unique_ids.add(word_id)
return unique_ids
| 9,900
|
Python
|
.py
| 227
| 36.198238
| 120
| 0.596866
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,148
|
csvcorpus.py
|
piskvorky_gensim/gensim/corpora/csvcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Zygmunt ZajÄ…c <zygmunt@fastml.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in CSV format."""
from __future__ import with_statement
import logging
import csv
import itertools
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class CsvCorpus(interfaces.CorpusABC):
"""Corpus in CSV format.
Notes
-----
The CSV delimiter, headers etc. are guessed automatically based on the file content.
All row values are expected to be ints/floats.
"""
def __init__(self, fname, labels):
"""
Parameters
----------
fname : str
Path to corpus.
labels : bool
If True - ignore first column (class labels).
"""
logger.info("loading corpus from %s", fname)
self.fname = fname
self.length = None
self.labels = labels
# load the first few lines, to guess the CSV dialect
with utils.open(self.fname, 'rb') as f:
head = ''.join(itertools.islice(f, 5))
self.headers = csv.Sniffer().has_header(head)
self.dialect = csv.Sniffer().sniff(head)
logger.info("sniffed CSV delimiter=%r, headers=%s", self.dialect.delimiter, self.headers)
def __iter__(self):
"""Iterate over the corpus, returning one BoW vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
with utils.open(self.fname, 'rb') as f:
reader = csv.reader(f, self.dialect)
if self.headers:
next(reader) # skip the headers
line_no = -1
for line_no, line in enumerate(reader):
if self.labels:
line.pop(0) # ignore the first column = class label
yield list(enumerate(float(x) for x in line))
self.length = line_no + 1 # store the total number of CSV rows = documents
| 2,078
|
Python
|
.py
| 55
| 29.581818
| 97
| 0.599601
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,149
|
svmlightcorpus.py
|
piskvorky_gensim/gensim/corpora/svmlightcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in SVMlight format."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class SvmLightCorpus(IndexedCorpus):
"""Corpus in SVMlight format.
Quoting http://svmlight.joachims.org/:
The input file contains the training examples. The first lines may contain comments and are ignored
if they start with #. Each of the following lines represents one training example
and is of the following format::
<line> .=. <target> <feature>:<value> <feature>:<value> ... <feature>:<value> # <info>
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
The "qid" feature (used for SVMlight ranking), if present, is ignored.
Notes
-----
Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based
(counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight
input file, and increment them again when saving as SVMlight.
"""
def __init__(self, fname, store_labels=True):
"""
Parameters
----------
fname: str
Path to corpus.
store_labels : bool, optional
Whether to store labels (~SVM target class). They currently have no application but stored
in `self.labels` for convenience by default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.length = None
self.store_labels = store_labels
self.labels = []
def __iter__(self):
""" Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
lineno = -1
self.labels = []
with utils.open(self.fname, 'rb') as fin:
for lineno, line in enumerate(fin):
doc = self.line2doc(line)
if doc is not None:
if self.store_labels:
self.labels.append(doc[1])
yield doc[0]
self.length = lineno + 1
@staticmethod
def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False):
"""Save a corpus in the SVMlight format.
The SVMlight `<target>` class tag is taken from the `labels` array, or set to 0 for all documents
if `labels` is not supplied.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
labels : list or False
An SVMlight `<target>` class tags or False if not present.
metadata : bool
ARGUMENT WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
logger.info("converting corpus to SVMlight format: %s", fname)
if labels is not False:
# Cast any sequence (incl. a numpy array) to a list, to simplify the processing below.
labels = list(labels)
offsets = []
with utils.open(fname, 'wb') as fout:
for docno, doc in enumerate(corpus):
label = labels[docno] if labels else 0 # target class is 0 by default
offsets.append(fout.tell())
fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label)))
return offsets
def docbyoffset(self, offset):
"""Get the document stored at file position `offset`.
Parameters
----------
offset : int
Document's position.
Returns
-------
tuple of (int, float)
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())[0]
# TODO: it brakes if gets None from line2doc
def line2doc(self, line):
"""Get a document from a single line in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`.
Parameters
----------
line : str
Line in SVMLight format.
Returns
-------
(list of (int, float), str)
Document in BoW format and target class label.
"""
line = utils.to_unicode(line)
line = line[: line.find('#')].strip()
if not line:
return None # ignore comments and empty lines
parts = line.split()
if not parts:
raise ValueError('invalid line format in %s' % self.fname)
target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]]
# ignore 'qid' features, convert 1-based feature ids to 0-based
doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid']
return doc, target
@staticmethod
def doc2line(doc, label=0):
"""Convert BoW representation of document in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`.
Parameters
----------
doc : list of (int, float)
Document in BoW format.
label : int, optional
Document label (if provided).
Returns
-------
str
`doc` in SVMlight format.
"""
pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base
return "%s %s\n" % (label, pairs)
| 6,083
|
Python
|
.py
| 149
| 31.530201
| 118
| 0.587886
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,150
|
mmcorpus.py
|
piskvorky_gensim/gensim/corpora/mmcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in the `Matrix Market format <https://math.nist.gov/MatrixMarket/formats.html>`_."""
import logging
from gensim import matutils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class MmCorpus(matutils.MmReader, IndexedCorpus):
"""Corpus serialized using the `sparse coordinate Matrix Market format
<https://math.nist.gov/MatrixMarket/formats.html>`_.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the matrix rows (~documents).
Notes
-----
The file is read into memory one document at a time, not the whole matrix at once,
unlike e.g. `scipy.io.mmread` and other implementations. This allows you to **process corpora which are larger
than the available RAM**, in a streamed manner.
Example
--------
.. sourcecode:: pycon
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>> for document in corpus:
... pass
"""
def __init__(self, fname):
"""
Parameters
----------
fname : {str, file-like object}
Path to file in MM format or a file-like object that supports `seek()`
(e.g. a compressed file opened by `smart_open <https://github.com/RaRe-Technologies/smart_open>`_).
"""
# avoid calling super(), too confusing
IndexedCorpus.__init__(self, fname)
matutils.MmReader.__init__(self, fname)
def __iter__(self):
"""Iterate through all documents.
Yields
------
list of (int, numeric)
Document in the `sparse Gensim bag-of-words format <intro.rst#core-concepts>`__.
Notes
------
The total number of vectors returned is always equal to the number of rows specified in the header.
Empty documents are inserted and yielded where appropriate, even if they are not explicitly stored in the
(sparse) Matrix Market file.
"""
for doc_id, doc in super(MmCorpus, self).__iter__():
yield doc # get rid of doc id, return the sparse vector only
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False):
"""Save a corpus to disk in the sparse coordinate Matrix Market format.
Parameters
----------
fname : str
Path to file.
corpus : iterable of list of (int, number)
Corpus in Bow format.
id2word : dict of (int, str), optional
Mapping between word_id -> word. Used to retrieve the total vocabulary size if provided.
Otherwise, the total vocabulary size is estimated based on the highest feature id encountered in `corpus`.
progress_cnt : int, optional
How often to report (log) progress.
metadata : bool, optional
Writes out additional metadata?
Warnings
--------
This function is automatically called by :class:`~gensim.corpora.mmcorpus.MmCorpus.serialize`, don't
call it directly, call :class:`~gensim.corpora.mmcorpus.MmCorpus.serialize` instead.
Example
-------
.. sourcecode:: pycon
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>>
>>> MmCorpus.save_corpus("random", corpus) # Do not do it, use `serialize` instead.
[97, 121, 169, 201, 225, 249, 258, 276, 303]
"""
logger.info("storing corpus in Matrix Market format to %s", fname)
num_terms = len(id2word) if id2word is not None else None
return matutils.MmWriter.write_corpus(
fname, corpus, num_terms=num_terms, index=True, progress_cnt=progress_cnt, metadata=metadata
)
| 4,260
|
Python
|
.py
| 91
| 38.472527
| 118
| 0.638118
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,151
|
hashdictionary.py
|
piskvorky_gensim/gensim/corpora/hashdictionary.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Implements the `"hashing trick" <https://en.wikipedia.org/wiki/Hashing-Trick>`_ -- a mapping between words
and their integer ids using a fixed, static mapping (hash function).
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) %% id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* Able to represent any token (not only those present in training documents)
Disadvantages:
* Multiple words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
"""
import logging
import itertools
import zlib
from gensim import utils
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Mapping between words and their integer ids, using a hashing function.
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
You can start converting words to ids immediately, without training on a corpus.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary(debug=False) # needs no training corpus!
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str, optional
Iterable of documents. If given, used to collect additional corpus statistics.
:class:`~gensim.corpora.hashdictionary.HashDictionary` can work
without these statistics (optional parameter).
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) %% id_range`.
myhash : function, optional
Hash function, should support interface `myhash(str) -> int`, uses `zlib.adler32` by default.
debug : bool, optional
Store which tokens have mapped to a given id? **Will use a lot of RAM**.
If you find yourself running out of memory (or not sure that you really need raw tokens),
keep `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if you initialized your :class:`~gensim.corpora.hashdictionary.HashDictionary` object
with `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all words that have mapped to this id.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, if `debug=True` was set in the constructor.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Collect corpus statistics from a corpus.
Warnings
--------
Useful only if `debug=True`, to build the reverse `id=>set(words)` mapping.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary(debug=True) # needs no training corpus!
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this", "is", "sparta"], ["just", "joking"]])
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert a sequence of words `document` into the bag-of-words format of `[(word_id, word_count)]`
(e.g. `[(1, 4), (150, 1), (2005, 2)]`).
Notes
-----
Each word is assumed to be a **tokenized and normalized** string. No further preprocessing
is done on the words in `document`: you have to apply tokenization, stemming etc before calling this method.
If `allow_update` or `self.allow_update` is set, then also update the dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : sequence of str
A sequence of word tokens = **tokenized and normalized** strings.
allow_update : bool, optional
Update corpus statistics and if `debug=True`, also the reverse id=>word mapping?
return_missing : bool, optional
Not used. Only here for compatibility with the Dictionary class.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>>
>>> dct = HashDictionary()
>>> dct.doc2bow(["this", "is", "máma"])
[(1721, 1), (5280, 1), (22493, 1)]
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in result.keys():
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(result.items())
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in the debug dictionary by their frequency.
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything. It only clears some
internal corpus statistics, for easier debugging and a smaller RAM footprint.
Warnings
--------
Only makes sense when `debug=True`.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in self.dfs_debug.items() if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in self.dfs_debug.items() if word in ok}
self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in self.id2token.items()
}
self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if self.id2token.get(tokenid, False)}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save the debug token=>id mapping to a text file.
Warnings
--------
Only makes sense when `debug=True`, for debugging.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving %s mapping to %s" % (self, fname))
with utils.open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
| 13,219
|
Python
|
.py
| 277
| 38.086643
| 119
| 0.605644
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,152
|
wikicorpus.py
|
piskvorky_gensim/gensim/corpora/wikicorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Copyright (C) 2018 Emmanouil Stergiadis <em.stergiadis@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
Uses multiprocessing internally to parallelize the work and process the dump more quickly.
Notes
-----
See :mod:`gensim.scripts.make_wiki` for a canned (example) command-line script based on this module.
"""
import bz2
import logging
import multiprocessing
import re
import signal
from pickle import PicklingError
# LXML isn't faster, so let's go with the built-in solution
from xml.etree.ElementTree import iterparse
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger(__name__)
ARTICLE_MIN_WORDS = 50
"""Ignore shorter articles (after full preprocessing)."""
# default thresholds for lengths of individual tokens
TOKEN_MIN_LEN = 2
TOKEN_MAX_LEN = 15
RE_P0 = re.compile(r'<!--.*?-->', re.DOTALL | re.UNICODE)
"""Comments."""
RE_P1 = re.compile(r'<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE)
"""Footnotes."""
RE_P2 = re.compile(r'(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$', re.UNICODE)
"""Links to languages."""
RE_P3 = re.compile(r'{{([^}{]*)}}', re.DOTALL | re.UNICODE)
"""Template."""
RE_P4 = re.compile(r'{{([^}]*)}}', re.DOTALL | re.UNICODE)
"""Template."""
RE_P5 = re.compile(r'\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE)
"""Remove URL, keep description."""
RE_P6 = re.compile(r'\[([^][]*)\|([^][]*)\]', re.DOTALL | re.UNICODE)
"""Simplify links, keep description."""
RE_P7 = re.compile(r'\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE)
"""Keep description of images."""
RE_P8 = re.compile(r'\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE)
"""Keep description of files."""
RE_P9 = re.compile(r'<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE)
"""External links."""
RE_P10 = re.compile(r'<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE)
"""Math content."""
RE_P11 = re.compile(r'<(.*?)>', re.DOTALL | re.UNICODE)
"""All other tags."""
RE_P12 = re.compile(r'(({\|)|(\|-(?!\d))|(\|}))(.*?)(?=\n)', re.UNICODE)
"""Table formatting."""
RE_P13 = re.compile(r'(?<=(\n[ ])|(\n\n)|([ ]{2})|(.\n)|(.\t))(\||\!)([^[\]\n]*?\|)*', re.UNICODE)
"""Table cell formatting."""
RE_P14 = re.compile(r'\[\[Category:[^][]*\]\]', re.UNICODE)
"""Categories."""
RE_P15 = re.compile(r'\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
"""Remove File and Image templates."""
RE_P16 = re.compile(r'\[{2}(.*?)\]{2}', re.UNICODE)
"""Capture interlinks text and article linked"""
RE_P17 = re.compile(
r'(\n.{0,4}((bgcolor)|(\d{0,1}[ ]?colspan)|(rowspan)|(style=)|(class=)|(align=)|(scope=))(.*))|'
r'(^.{0,2}((bgcolor)|(\d{0,1}[ ]?colspan)|(rowspan)|(style=)|(class=)|(align=))(.*))',
re.UNICODE
)
"""Table markup"""
IGNORED_NAMESPACES = [
'Wikipedia', 'Category', 'File', 'Portal', 'Template',
'MediaWiki', 'User', 'Help', 'Book', 'Draft', 'WikiProject',
'Special', 'Talk'
]
"""MediaWiki namespaces that ought to be ignored."""
def filter_example(elem, text, *args, **kwargs):
"""Example function for filtering arbitrary documents from wikipedia dump.
The custom filter function is called _before_ tokenisation and should work on
the raw text and/or XML element information.
The filter function gets the entire context of the XML element passed into it,
but you can of course choose not the use some or all parts of the context. Please
refer to :func:`gensim.corpora.wikicorpus.extract_pages` for the exact details
of the page context.
Parameters
----------
elem : etree.Element
XML etree element
text : str
The text of the XML node
namespace : str
XML namespace of the XML element
title : str
Page title
page_tag : str
XPath expression for page.
text_path : str
XPath expression for text.
title_path : str
XPath expression for title.
ns_path : str
XPath expression for namespace.
pageid_path : str
XPath expression for page id.
Example
-------
.. sourcecode:: pycon
>>> import gensim.corpora
>>> filter_func = gensim.corpora.wikicorpus.filter_example
>>> dewiki = gensim.corpora.WikiCorpus(
... './dewiki-20180520-pages-articles-multistream.xml.bz2',
... filter_articles=filter_func)
"""
# Filter German wikipedia dump for articles that are marked either as
# Lesenswert (featured) or Exzellent (excellent) by wikipedia editors.
# *********************
# regex is in the function call so that we do not pollute the wikicorpus
# namespace do not do this in production as this function is called for
# every element in the wiki dump
_regex_de_excellent = re.compile(r'.*\{\{(Exzellent.*?)\}\}[\s]*', flags=re.DOTALL)
_regex_de_featured = re.compile(r'.*\{\{(Lesenswert.*?)\}\}[\s]*', flags=re.DOTALL)
if text is None:
return False
if _regex_de_excellent.match(text) or _regex_de_featured.match(text):
return True
else:
return False
def find_interlinks(raw):
"""Find all interlinks to other articles in the dump.
Parameters
----------
raw : str
Unicode or utf-8 encoded string.
Returns
-------
list
List of tuples in format [(linked article, the actual text found), ...].
"""
filtered = filter_wiki(raw, promote_remaining=False, simplify_links=False)
interlinks_raw = re.findall(RE_P16, filtered)
interlinks = []
for parts in [i.split('|') for i in interlinks_raw]:
actual_title = parts[0]
try:
interlink_text = parts[1]
except IndexError:
interlink_text = actual_title
interlink_tuple = (actual_title, interlink_text)
interlinks.append(interlink_tuple)
legit_interlinks = [(i, j) for i, j in interlinks if '[' not in i and ']' not in i]
return legit_interlinks
def filter_wiki(raw, promote_remaining=True, simplify_links=True):
"""Filter out wiki markup from `raw`, leaving only text.
Parameters
----------
raw : str
Unicode or utf-8 encoded string.
promote_remaining : bool
Whether uncaught markup should be promoted to plain text.
simplify_links : bool
Whether links should be simplified keeping only their description text.
Returns
-------
str
`raw` without markup.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text, promote_remaining, simplify_links)
def remove_markup(text, promote_remaining=True, simplify_links=True):
"""Filter out wiki markup from `text`, leaving only text.
Parameters
----------
text : str
String containing markup.
promote_remaining : bool
Whether uncaught markup should be promoted to plain text.
simplify_links : bool
Whether links should be simplified keeping only their description text.
Returns
-------
str
`text` without markup.
"""
text = re.sub(RE_P2, '', text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, '', text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, '', text) # remove outside links
text = re.sub(RE_P10, '', text) # remove math content
text = re.sub(RE_P11, '', text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
if simplify_links:
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace("!!", "\n|") # each table head cell on a separate line
text = text.replace("|-||", "\n|") # for cases where a cell is filled with '-'
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = text.replace('|||', '|\n|') # each table cell on a separate line(where |{{a|b}}||cell-content)
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P13, '\n', text) # leave only cell content
text = re.sub(RE_P17, '\n', text) # remove formatting lines
# remove empty mark-up
text = text.replace('[]', '')
# stop if nothing changed between two iterations or after a fixed number of iterations
if old == text or iters > 2:
break
if promote_remaining:
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Parameters
----------
s : str
String containing markup template.
Returns
-------
str
Сopy of `s` with all the `wikimedia markup template <http://meta.wikimedia.org/wiki/Help:Template>`_ removed.
Notes
-----
Since template can be nested, it is difficult remove them using regular expressions.
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], [-1]
in_template = False
prev_c = None
for i, c in enumerate(s):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
starts.append(None)
return ''.join(s[end + 1:start] for end, start in zip(ends, starts))
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Parameters
----------
s : str
String containing 'File:' and 'Image:' markup.
Returns
-------
str
Сopy of `s` with all the 'File:' and 'Image:' markup replaced by their `corresponding captions
<http://www.mediawiki.org/wiki/Help:Images>`_.
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content, token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True):
"""Tokenize a piece of text from Wikipedia.
Set `token_min_len`, `token_max_len` as character length (not bytes!) thresholds for individual tokens.
Parameters
----------
content : str
String without markup (see :func:`~gensim.corpora.wikicorpus.filter_wiki`).
token_min_len : int
Minimal token length.
token_max_len : int
Maximal token length.
lower : bool
Convert `content` to lower case?
Returns
-------
list of str
List of tokens from `content`.
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=lower, errors='ignore')
if token_min_len <= len(token) <= token_max_len and not token.startswith('_')
]
def get_namespace(tag):
"""Get the namespace of tag.
Parameters
----------
tag : str
Namespace or tag.
Returns
-------
str
Matched namespace or tag.
"""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace" % namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False, filter_articles=None):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File-like object.
filter_namespaces : list of str or bool
Namespaces that will be extracted.
Yields
------
tuple of (str or None, str, str)
Title, text and page id.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
if filter_namespaces:
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
if filter_articles is not None:
if not filter_articles(
elem, namespace=namespace, title=title,
text=text, page_tag=page_tag,
text_path=text_path, title_path=title_path,
ns_path=ns_path, pageid_path=pageid_path):
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(
args, tokenizer_func=tokenize, token_min_len=TOKEN_MIN_LEN,
token_max_len=TOKEN_MAX_LEN, lower=True,
):
"""Parse a Wikipedia article, extract all tokens.
Notes
-----
Set `tokenizer_func` (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`) parameter for languages
like Japanese or Thai to perform better tokenization.
The `tokenizer_func` needs to take 4 parameters: (text: str, token_min_len: int, token_max_len: int, lower: bool).
Parameters
----------
args : (str, str, int)
Article text, article title, page identificator.
tokenizer_func : function
Function for tokenization (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`).
Needs to have interface:
tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str.
token_min_len : int
Minimal token length.
token_max_len : int
Maximal token length.
lower : bool
Convert article text to lower case?
Returns
-------
(list of str, str, int)
List of tokens from article, title and page id.
"""
text, title, pageid = args
text = filter_wiki(text)
result = tokenizer_func(text, token_min_len, token_max_len, lower)
return result, title, pageid
def init_to_ignore_interrupt():
"""Enables interruption ignoring.
Warnings
--------
Should only be used when master is prepared to handle termination of
child processes.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _process_article(args):
"""Same as :func:`~gensim.corpora.wikicorpus.process_article`, but with args in list format.
Parameters
----------
args : [(str, bool, str, int), (function, int, int, bool)]
First element - same as `args` from :func:`~gensim.corpora.wikicorpus.process_article`,
second element is tokenizer function, token minimal length, token maximal length, lowercase flag.
Returns
-------
(list of str, str, int)
List of tokens from article, title and page id.
Warnings
--------
Should not be called explicitly. Use :func:`~gensim.corpora.wikicorpus.process_article` instead.
"""
tokenizer_func, token_min_len, token_max_len, lower = args[-1]
args = args[:-1]
return process_article(
args, tokenizer_func=tokenizer_func, token_min_len=token_min_len,
token_max_len=token_max_len, lower=lower,
)
class WikiCorpus(TextCorpus):
"""Treat a Wikipedia articles dump as a read-only, streamed, memory-efficient corpus.
Supported dump formats:
* <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
* <LANG>wiki-latest-pages-articles.xml.bz2
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
Notes
-----
Dumps for the English Wikipedia can be founded at https://dumps.wikimedia.org/enwiki/.
Attributes
----------
metadata : bool
Whether to write articles titles to serialized corpus.
Warnings
--------
"Multistream" archives are *not* supported in Python 2 due to `limitations in the core bz2 library
<https://docs.python.org/2/library/bz2.html#de-compression-of-files>`_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.corpora import WikiCorpus, MmCorpus
>>>
>>> path_to_wiki_dump = datapath("enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2")
>>> corpus_path = get_tmpfile("wiki-corpus.mm")
>>>
>>> wiki = WikiCorpus(path_to_wiki_dump) # create word->word_id mapping, ~8h on full wiki
>>> MmCorpus.serialize(corpus_path, wiki) # another 8h, creates a file in MatrixMarket format and mapping
"""
def __init__(
self, fname, processes=None, lemmatize=None, dictionary=None, metadata=False,
filter_namespaces=('0',), tokenizer_func=tokenize, article_min_tokens=ARTICLE_MIN_WORDS,
token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True, filter_articles=None,
):
"""Initialize the corpus.
Unless a dictionary is provided, this scans the corpus once,
to determine its vocabulary.
Parameters
----------
fname : str
Path to the Wikipedia dump file.
processes : int, optional
Number of processes to run, defaults to `max(1, number of cpu - 1)`.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Dictionary, if not provided, this scans the corpus once, to determine its vocabulary
**IMPORTANT: this needs a really long time**.
filter_namespaces : tuple of str, optional
Namespaces to consider.
tokenizer_func : function, optional
Function that will be used for tokenization. By default, use :func:`~gensim.corpora.wikicorpus.tokenize`.
If you inject your own tokenizer, it must conform to this interface:
`tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str`
article_min_tokens : int, optional
Minimum tokens in article. Article will be ignored if number of tokens is less.
token_min_len : int, optional
Minimal token length.
token_max_len : int, optional
Maximal token length.
lower : bool, optional
If True - convert all text to lower case.
filter_articles: callable or None, optional
If set, each XML article element will be passed to this callable before being processed. Only articles
where the callable returns an XML element are processed, returning None allows filtering out
some articles based on customised rules.
metadata: bool
Have the `get_texts()` method yield `(content_tokens, (page_id, page_title))` tuples, instead
of just `content_tokens`.
Warnings
--------
Unless a dictionary is provided, this scans the corpus once, to determine its vocabulary.
"""
if lemmatize is not None:
raise NotImplementedError(
'The lemmatize parameter is no longer supported. '
'If you need to lemmatize, use e.g. <https://github.com/clips/pattern>. '
'Perform lemmatization as part of your tokenization function and '
'pass it as the tokenizer_func parameter to this initializer.'
)
self.fname = fname
self.filter_namespaces = filter_namespaces
self.filter_articles = filter_articles
self.metadata = metadata
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.tokenizer_func = tokenizer_func
self.article_min_tokens = article_min_tokens
self.token_min_len = token_min_len
self.token_max_len = token_max_len
self.lower = lower
if dictionary is None:
self.dictionary = Dictionary(self.get_texts())
else:
self.dictionary = dictionary
@property
def input(self):
return self.fname
def get_texts(self):
"""Iterate over the dump, yielding a list of tokens for each article that passed
the length and namespace filtering.
Uses multiprocessing internally to parallelize the work and process the dump more quickly.
Notes
-----
This iterates over the **texts**. If you want vectors, just use the standard corpus interface
instead of this method:
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import WikiCorpus
>>>
>>> path_to_wiki_dump = datapath("enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2")
>>>
>>> for vec in WikiCorpus(path_to_wiki_dump):
... pass
Yields
------
list of str
If `metadata` is False, yield only list of token extracted from the article.
(list of str, (int, str))
List of tokens (extracted from the article), page id and article title otherwise.
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
tokenization_params = (self.tokenizer_func, self.token_min_len, self.token_max_len, self.lower)
texts = (
(text, title, pageid, tokenization_params)
for title, text, pageid
in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces, self.filter_articles)
)
pool = multiprocessing.Pool(self.processes, init_to_ignore_interrupt)
try:
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(_process_article, group):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < self.article_min_tokens or \
any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
except KeyboardInterrupt:
logger.warning(
"user terminated iteration over Wikipedia corpus after %i documents with %i positions "
"(total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, self.article_min_tokens
)
except PicklingError as exc:
raise PicklingError(
f'Can not send filtering function {self.filter_articles} to multiprocessing, '
'make sure the function can be pickled.'
) from exc
else:
logger.info(
"finished iterating over Wikipedia corpus of %i documents with %i positions "
"(total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, self.article_min_tokens
)
self.length = articles # cache corpus length
finally:
pool.terminate()
| 26,219
|
Python
|
.py
| 601
| 35.933444
| 118
| 0.621945
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,153
|
_mmreader.pyx
|
piskvorky_gensim/gensim/corpora/_mmreader.pyx
|
# Copyright (C) 2018 Radim Rehurek <radimrehurek@seznam.cz>
# cython: embedsignature=True
# cython: language_level=3
"""Reader for corpus in the Matrix Market format."""
import logging
cimport cython
from libc.stdio cimport sscanf
from gensim import utils
logger = logging.getLogger(__name__)
cdef class MmReader():
"""Matrix market file reader (fast Cython version), used internally in :class:`~gensim.corpora.mmcorpus.MmCorpus`.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the rows (~documents).
Attributes
----------
num_docs : int
Number of documents in the market matrix file.
num_terms : int
Number of terms.
num_nnz : int
Number of non-zero terms.
Notes
-----
Note that the file is read into memory one document at a time, not the whole matrix at once
(unlike e.g. `scipy.io.mmread` and other implementations).
This allows us to process corpora which are larger than the available RAM.
"""
cdef public input
cdef public bint transposed
cdef public long long num_docs, num_terms, num_nnz
def __init__(self, input, transposed=True):
"""
Parameters
----------
input : {str, file-like object}
Path to the input file in MM format or a file-like object that supports `seek()`
(e.g. smart_open objects).
transposed : bool, optional
Do lines represent `doc_id, term_id, value`, instead of `term_id, doc_id, value`?
"""
logger.info("initializing cython corpus reader from %s", input)
self.input, self.transposed = input, transposed
with utils.open_file(self.input) as lines:
try:
header = utils.to_unicode(next(lines)).strip()
if not header.lower().startswith('%%matrixmarket matrix coordinate real general'):
raise ValueError(
"File %s not in Matrix Market format with coordinate real general; instead found: \n%s" %
(self.input, header)
)
except StopIteration:
pass
self.num_docs = self.num_terms = self.num_nnz = 0
for lineno, line in enumerate(lines):
line = utils.to_unicode(line)
if not line.startswith('%'):
self.num_docs, self.num_terms, self.num_nnz = (int(x) for x in line.split())
if not self.transposed:
self.num_docs, self.num_terms = self.num_terms, self.num_docs
break
logger.info(
"accepted corpus with %i documents, %i features, %i non-zero entries",
self.num_docs, self.num_terms, self.num_nnz
)
def __len__(self):
"""Get the corpus size: total number of documents."""
return self.num_docs
def __str__(self):
return ("MmCorpus(%i documents, %i features, %i non-zero entries)" %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
"""Skip file headers that appear before the first document.
Parameters
----------
input_file : iterable of str
Iterable taken from file in MM format.
"""
for line in input_file:
if line.startswith(b'%'):
continue
break
def __iter__(self):
"""Iterate through all documents in the corpus.
Notes
------
Note that the total number of vectors returned is always equal to the number of rows specified
in the header: empty documents are inserted and yielded where appropriate, even if they are not explicitly
stored in the Matrix Market file.
Yields
------
(int, list of (int, number))
Document id and document in sparse bag-of-words format.
"""
cdef long long docid, termid, previd
cdef double val = 0
with utils.file_or_filename(self.input) as lines:
self.skip_headers(lines)
previd = -1
for line in lines:
if (sscanf(line, "%lld %lld %lg", &docid, &termid, &val) != 3):
raise ValueError("unable to parse line: {}".format(line))
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid -= 1
termid -= 1
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
# change of document: return the document read so far (its id is prevId)
if previd >= 0:
yield previd, document # noqa:F821
# return implicit (empty) documents between previous id and new id
# too, to keep consistent document numbering and corpus length
for previd in range(previd + 1, docid):
yield previd, []
# from now on start adding fields to a new document, with a new id
previd = docid
document = []
document.append((termid, val,)) # add another field to the current document
# handle the last document, as a special case
if previd >= 0:
yield previd, document
# return empty documents between the last explicit document and the number
# of documents as specified in the header
for previd in range(previd + 1, self.num_docs):
yield previd, []
def docbyoffset(self, offset):
"""Get the document at file offset `offset` (in bytes).
Parameters
----------
offset : int
File offset, in bytes, of the desired document.
Returns
------
list of (int, str)
Document in sparse bag-of-words format.
"""
# empty documents are not stored explicitly in MM format, so the index marks
# them with a special offset, -1.
cdef long long docid, termid, previd
cdef double val
if offset == -1:
return []
if isinstance(self.input, str):
fin, close_fin = utils.open(self.input, 'rb'), True
else:
fin, close_fin = self.input, False
fin.seek(offset) # works for gzip/bz2 input, too
previd, document = -1, []
for line in fin:
if (sscanf(line, "%lld %lld %lg", &docid, &termid, &val) != 3):
raise ValueError("unable to parse line: {}".format(line))
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid -= 1
termid -= 1
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
if previd >= 0:
break
previd = docid
document.append((termid, val,)) # add another field to the current document
if close_fin:
fin.close()
return document
| 7,392
|
Python
|
.py
| 166
| 32.957831
| 118
| 0.573757
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,154
|
sharded_corpus.py
|
piskvorky_gensim/gensim/corpora/sharded_corpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Original author: Jan Hajic jr.
# Copyright (C) 2015 Radim Rehurek and gensim team.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module implements a corpus class that stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from it
on demand).
The corpus is intended for situations where you need to use your data
as numpy arrays for some iterative processing (like training something
using SGD, which usually involves heavy matrix multiplication).
"""
import logging
import os
import math
import time
import numpy
import scipy.sparse as sparse
import gensim
from gensim.corpora import IndexedCorpus
from gensim.interfaces import TransformedCorpus
logger = logging.getLogger(__name__)
#: Specifies which dtype should be used for serializing the shards.
_default_dtype = float
try:
import theano
_default_dtype = theano.config.floatX
except ImportError:
logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.')
class ShardedCorpus(IndexedCorpus):
"""
This corpus is designed for situations where you need to train a model
on matrices, with a large number of iterations. (It should be faster than
gensim's other IndexedCorpus implementations for this use case; check the
`benchmark_datasets.py` script. It should also serialize faster.)
The corpus stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from
it on demand). Persistence is done using the standard gensim load/save methods.
.. note::
The dataset is **read-only**, there is - as opposed to gensim's Similarity
class, which works similarly - no way of adding documents to the dataset
(for now).
You can use ShardedCorpus to serialize your data just like any other gensim
corpus that implements serialization. However, because the data is saved
as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to
supply the dimension of your data to the corpus. (The dimension of word
frequency vectors will typically be the size of the vocabulary, etc.)
.. sourcecode:: pycon
>>> corpus = gensim.utils.mock_data()
>>> output_prefix = 'mydata.shdat'
>>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000)
The `output_prefix` tells the ShardedCorpus where to put the data.
Shards are saved as `output_prefix.0`, `output_prefix.1`, etc.
All shards must be of the same size. The shards can be re-sized (which
is essentially a re-serialization into new-size shards), but note that
this operation will temporarily take twice as much disk space, because
the old shards are not deleted until the new shards are safely in place.
After serializing the data, the corpus will then save itself to the file
`output_prefix`.
On further initialization with the same `output_prefix`, the corpus
will load the already built dataset unless the `overwrite` option is
given. (A new object is "cloned" from the one saved to `output_prefix`
previously.)
To retrieve data, you can load the corpus and use it like a list:
.. sourcecode:: pycon
>>> sh_corpus = ShardedCorpus.load(output_prefix)
>>> batch = sh_corpus[100:150]
This will retrieve a numpy 2-dimensional array of 50 rows and 1000
columns (1000 was the dimension of the data we supplied to the corpus).
To retrieve gensim-style sparse vectors, set the `gensim` property:
.. sourcecode:: pycon
>>> sh_corpus.gensim = True
>>> batch = sh_corpus[100:150]
The batch now will be a generator of gensim vectors.
Since the corpus needs the data serialized in order to be able to operate,
it will serialize data right away on initialization. Instead of calling
`ShardedCorpus.serialize()`, you can just initialize and use the corpus
right away:
.. sourcecode:: pycon
>>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000)
>>> batch = corpus[100:150]
ShardedCorpus also supports working with scipy sparse matrices, both
during retrieval and during serialization. If you want to serialize your
data as sparse matrices, set the `sparse_serialization` flag. For
retrieving your data as sparse matrices, use the `sparse_retrieval`
flag. (You can also retrieve densely serialized data as sparse matrices,
for the sake of completeness, and vice versa.) By default, the corpus
will retrieve numpy ndarrays even if it was serialized into sparse
matrices.
.. sourcecode:: pycon
>>> sparse_prefix = 'mydata.sparse.shdat'
>>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True)
>>> sparse_corpus = ShardedCorpus.load(sparse_prefix)
>>> batch = sparse_corpus[100:150]
>>> type(batch)
<type 'numpy.ndarray'>
>>> sparse_corpus.sparse_retrieval = True
>>> batch = sparse_corpus[100:150]
<class 'scipy.sparse.csr.csr_matrix'>
While you *can* touch the `sparse_retrieval` attribute during the life
of a ShardedCorpus object, you should definitely not touch `
`sharded_serialization`! Changing the attribute will not miraculously
re-serialize the data in the requested format.
The CSR format is used for sparse data throughout.
Internally, to retrieve data, the dataset keeps track of which shard is
currently open and on a `__getitem__` request, either returns an item from
the current shard, or opens a new one. The shard size is constant, except
for the last shard.
"""
def __init__(self, output_prefix, corpus, dim=None,
shardsize=4096, overwrite=False, sparse_serialization=False,
sparse_retrieval=False, gensim=False):
"""Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn't advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.
"""
self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim # This number may change during initialization/loading.
# Sparse vs. dense serialization and retrieval.
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
# The "state" of the dataset.
self.current_shard = None # The current shard itself (numpy ndarray)
self.current_shard_n = None # Current shard is the current_shard_n-th
self.current_offset = None # The index into the dataset which
# corresponds to index 0 of current shard
logger.info('Initializing sharded corpus with prefix %s', output_prefix)
if (not os.path.isfile(output_prefix)) or overwrite:
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
# Save automatically, to facilitate re-loading
# and retain information about how the corpus
# was serialized.
logger.info('Saving ShardedCorpus object to %s', self.output_prefix)
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
"""Initialize shards from the corpus."""
is_corpus, corpus = gensim.utils.is_corpus(corpus)
if not is_corpus:
raise ValueError("Cannot initialize shards without a corpus to read from! Corpus type: %s" % type(corpus))
proposed_dim = self._guess_n_features(corpus)
if proposed_dim != self.dim:
if self.dim is None:
logger.info('Deriving dataset dimension from corpus: %d', proposed_dim)
else:
logger.warning(
"Dataset dimension derived from input corpus differs from initialization argument, "
"using corpus. (corpus %d, init arg %d)", proposed_dim, self.dim
)
self.dim = proposed_dim
self.offsets = [0]
start_time = time.perf_counter()
logger.info('Running init from corpus.')
for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. %d at %f s', n, time.perf_counter() - start_time)
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: %d x %d', len(doc_chunk), self.dim)
for i, doc in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(doc.values())
# Handles the updating as well.
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.perf_counter()
logger.info('Built %d shards in %f s.', self.n_shards, end_time - start_time)
def init_by_clone(self):
"""
Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().
"""
temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if temp.dim != self.dim:
if self.dim is None:
logger.info('Loaded dataset dimension: %d', temp.dim)
else:
logger.warning(
"Loaded dataset dimension differs from init arg dimension, "
"using loaded dim. (loaded %d, init %d)",
temp.dim, self.dim
)
self.dim = temp.dim # To be consistent with the loaded data!
def save_shard(self, shard, n=None, filename=None):
"""
Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.
"""
new_shard = False
if n is None:
n = self.n_shards # Saving the *next* one by default.
new_shard = True
if not filename:
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append(self.offsets[-1] + shard.shape[0])
self.n_docs += shard.shape[0]
self.n_shards += 1
def load_shard(self, n):
"""
Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object."""
# No-op if the shard is already open.
if self.current_shard_n == n:
return
filename = self._shard_name(n)
if not os.path.isfile(filename):
raise ValueError('Attempting to load nonexistent shard no. %s' % n)
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
def reset(self):
"""
Reset to no shard at all. Used for saving.
"""
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
def shard_by_offset(self, offset):
"""
Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.
"""
k = int(offset / self.shardsize)
if offset >= self.n_docs:
raise ValueError('Too high offset specified (%s), available docs: %s' % (offset, self.n_docs))
if offset < 0:
raise ValueError('Negative offset %s currently not supported.' % offset)
return k
def in_current(self, offset):
"""
Determine whether the given offset falls within the current shard.
"""
return (self.current_offset <= offset) and (offset < self.offsets[self.current_shard_n + 1])
def in_next(self, offset):
"""
Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.
"""
if self.current_shard_n == self.n_shards:
return False # There's no next shard.
return self.offsets[self.current_shard_n + 1] <= offset and offset < self.offsets[self.current_shard_n + 2]
def resize_shards(self, shardsize):
"""
Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we're
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.
"""
# Determine how many new shards there will be
n_new_shards = int(math.floor(self.n_docs / float(shardsize)))
if self.n_docs % shardsize != 0:
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in range(n_new_shards):
new_start = shardsize * new_shard_idx
new_stop = new_start + shardsize
# Last shard?
if new_stop > self.n_docs:
# Sanity check
assert new_shard_idx == n_new_shards - 1, \
'Shard no. %r that ends at %r over last document (%r) is not the last projected shard (%r)' % (
new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
# Clean up on unsuccessful resize.
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
# Move old shard files out, new ones in. Complicated due to possibility
# of exceptions.
old_shard_names = [self._shard_name(n) for n in range(self.n_shards)]
try:
for old_shard_n, old_shard_name in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.exception(
'Error during old shard no. %d removal: %s.\nAttempting to at least move new shards in.',
old_shard_n, str(e),
)
finally:
# If something happens with cleaning up - try to at least get the
# new guys in.
try:
for shard_n, new_shard_name in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
# If something happens when we're in this stage, we're screwed.
except Exception as e:
logger.exception(e)
raise RuntimeError('Resizing completely failed. Sorry, dataset is probably ruined...')
finally:
# Sets the new shard stats.
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
def _shard_name(self, n):
"""Generate the name for the n-th shard."""
return self.output_prefix + '.' + str(n)
def _resized_shard_name(self, n):
"""
Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.
"""
return self.output_prefix + '.resize-temp.' + str(n)
def _guess_n_features(self, corpus):
"""Attempt to guess number of features in `corpus`."""
n_features = None
if hasattr(corpus, 'dim'):
# print 'Guessing from \'dim\' attribute.'
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
# print 'GUessing from dictionary.'
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
# print 'Guessing from \'n_out\' attribute.'
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
# print 'Guessing from \'num_terms\' attribute.'
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
# TransformedCorpus: first check if the transformer object
# defines some output dimension; if it doesn't, relegate guessing
# to the corpus that is being transformed. This may easily fail!
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
else:
if not self.dim:
raise TypeError(
"Couldn't find number of features, refusing to guess. Dimension: %s, corpus: %s)" % (
self.dim, type(corpus),
)
)
logger.warning("Couldn't find number of features, trusting supplied dimension (%d)", self.dim)
n_features = self.dim
if self.dim and n_features != self.dim:
logger.warning(
"Discovered inconsistent dataset dim (%d) and feature count from corpus (%d). "
"Coercing to dimension given by argument.",
self.dim, n_features,
)
return n_features
def __len__(self):
return self.n_docs
def _ensure_shard(self, offset):
# No shard loaded
if self.current_shard is None:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
# Find appropriate shard, if necessary
elif not self.in_current(offset):
if self.in_next(offset):
self.load_shard(self.current_shard_n + 1)
else:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
def get_by_offset(self, offset):
"""As opposed to getitem, this one only accepts ints as offsets."""
self._ensure_shard(offset)
result = self.current_shard[offset - self.current_offset]
return result
def __getitem__(self, offset):
"""
Retrieve the given row of the dataset. Supports slice notation.
"""
if isinstance(offset, list):
# Handle all serialization & retrieval options.
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i)
for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif not self.sparse_retrieval:
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if stop > self.n_docs:
raise IndexError('Requested slice offset %s out of range (%s docs)' % (stop, self.n_docs))
# - get range of shards over which to iterate
first_shard = self.shard_by_offset(start)
last_shard = self.n_shards - 1
if not stop == self.n_docs:
last_shard = self.shard_by_offset(stop)
# This fails on one-past
# slice indexing; that's why there's a code branch here.
self.load_shard(first_shard)
# The easy case: both in one shard.
if first_shard == last_shard:
s_result = self.current_shard[start - self.current_offset: stop - self.current_offset]
# Handle different sparsity settings:
s_result = self._getitem_format(s_result)
return s_result
# The hard case: the slice is distributed across multiple shards
# - initialize numpy.zeros()
s_result = numpy.zeros((stop - start, self.dim), dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim), dtype=self.current_shard.dtype)
# - gradually build it up. We will be using three set of start:stop
# indexes:
# - into the dataset (these are the indexes the caller works with)
# - into the current shard
# - into the result
# Indexes into current result rows. These are always smaller than
# the dataset indexes by `start` (as we move over the shards,
# we're moving by the same number of rows through the result).
result_start = 0
result_stop = self.offsets[self.current_shard_n + 1] - start
# Indexes into current shard. These are trickiest:
# - if in starting shard, these are from (start - current_offset)
# to self.shardsize
# - if in intermediate shard, these are from 0 to self.shardsize
# - if in ending shard, these are from 0
# to (stop - current_offset)
shard_start = start - self.current_offset
shard_stop = self.offsets[self.current_shard_n + 1] - self.current_offset
# s_result[result_start:result_stop] = self.current_shard[
# shard_start:shard_stop]
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
# First and last get special treatment, these are in between
for shard_n in range(first_shard + 1, last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
# Last shard
self.load_shard(last_shard)
result_start = result_stop
result_stop += stop - self.current_offset
shard_start = 0
shard_stop = stop - self.current_offset
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
"""
Add rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the ``self.sparse_serialize`` setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Return the resulting ``s_result``.
"""
if (result_stop - result_start) != (stop - start):
raise ValueError(
'Result start/stop range different than stop/start range (%s - %s vs. %s - %s)' % (
result_start, result_stop, start, stop,
)
)
# Dense data: just copy using numpy's slice notation
if not self.sparse_serialization:
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
# A bit more difficult, we're using a different structure to build the
# result.
if s_result.shape != (result_start, self.dim):
raise ValueError(
'Assuption about sparse s_result shape invalid: %s expected rows, %s real rows.' % (
result_start, s_result.shape[0],
)
)
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
def _getitem_format(self, s_result):
if self.sparse_serialization:
if self.gensim:
s_result = self._getitem_sparse2gensim(s_result)
elif not self.sparse_retrieval:
s_result = numpy.array(s_result.todense())
else:
if self.gensim:
s_result = self._getitem_dense2gensim(s_result)
elif self.sparse_retrieval:
s_result = sparse.csr_matrix(s_result)
return s_result
def _getitem_sparse2gensim(self, result):
"""
Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.
"""
def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx + 1]]
g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in range(result.shape[0]))
return output
def _getitem_dense2gensim(self, result):
"""Change given dense result matrix to gensim sparse vectors."""
if len(result.shape) == 1:
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i])
for i in range(result.shape[0]))
return output
# Overriding the IndexedCorpus and other corpus superclass methods
def __iter__(self):
"""
Yield dataset items one by one (generator).
"""
for i in range(len(self)):
yield self[i]
def save(self, *args, **kwargs):
"""
Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.
"""
# Can we save to a different file than output_prefix? Well, why not?
if len(args) == 0:
args = (self.output_prefix,)
attrs_to_ignore = ['current_shard', 'current_shard_n', 'current_offset']
if 'ignore' in kwargs:
attrs_to_ignore.extend(kwargs['ignore'])
kwargs['ignore'] = frozenset(attrs_to_ignore)
super(ShardedCorpus, self).save(*args, **kwargs)
@classmethod
def load(cls, fname, mmap=None):
"""
Load itself in clean state. `mmap` has no effect here.
"""
return super(ShardedCorpus, cls).load(fname, mmap)
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False, **kwargs):
"""
Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.
"""
ShardedCorpus(fname, corpus, **kwargs)
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None, progress_cnt=None,
labels=None, metadata=False, **kwargs):
"""
Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass.
"""
serializer.save_corpus(
fname, corpus, id2word=id2word, progress_cnt=progress_cnt, metadata=metadata, **kwargs,
)
| 33,071
|
Python
|
.py
| 656
| 39.699695
| 118
| 0.623694
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,155
|
__init__.py
|
piskvorky_gensim/gensim/corpora/__init__.py
|
"""
This package contains implementations of various streaming corpus I/O format.
"""
# bring corpus classes directly into package namespace, to save some typing
from .indexedcorpus import IndexedCorpus # noqa:F401 must appear before the other classes
from .mmcorpus import MmCorpus # noqa:F401
from .bleicorpus import BleiCorpus # noqa:F401
from .svmlightcorpus import SvmLightCorpus # noqa:F401
from .lowcorpus import LowCorpus # noqa:F401
from .dictionary import Dictionary # noqa:F401
from .hashdictionary import HashDictionary # noqa:F401
from .wikicorpus import WikiCorpus # noqa:F401
from .textcorpus import TextCorpus, TextDirectoryCorpus # noqa:F401
from .ucicorpus import UciCorpus # noqa:F401
from .malletcorpus import MalletCorpus # noqa:F401
from .opinosiscorpus import OpinosisCorpus # noqa:F401
| 824
|
Python
|
.py
| 16
| 50.375
| 90
| 0.818859
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,156
|
lowcorpus.py
|
piskvorky_gensim/gensim/corpora/lowcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in `GibbsLda++ format <https://gibbslda.sourceforge.net/>`_."""
import logging
from collections import Counter
from gensim import utils
from gensim.corpora import IndexedCorpus
from gensim.parsing.preprocessing import split_on_space
logger = logging.getLogger(__name__)
class LowCorpus(IndexedCorpus):
"""Corpus handles input in `GibbsLda++ format <https://gibbslda.sourceforge.net/>`_.
**Format description**
Both data for training/estimating the model and new data (i.e., previously unseen data) have the same format
as follows ::
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line after that is one document.
[documenti] is the ith document of the dataset that consists of a list of Ni words/terms ::
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated by the blank character.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile, common_texts
>>> from gensim.corpora import LowCorpus
>>> from gensim.corpora import Dictionary
>>>
>>> # Prepare needed data
>>> dictionary = Dictionary(common_texts)
>>> corpus = [dictionary.doc2bow(doc) for doc in common_texts]
>>>
>>> # Write corpus in GibbsLda++ format to disk
>>> output_fname = get_tmpfile("corpus.low")
>>> LowCorpus.serialize(output_fname, corpus, dictionary)
>>>
>>> # Read corpus
>>> loaded_corpus = LowCorpus(output_fname)
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Parameters
----------
fname : str
Path to file in GibbsLda++ format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `fname`.
line2words : callable, optional
Function which converts lines(str) into tokens(list of str),
using :func:`~gensim.parsing.preprocessing.split_on_space` as default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
# build a mapping of word id(int) -> word (string)
self.id2word = dict(zip(range(len(all_terms)), all_terms))
else:
logger.info("using provided word mapping (%i ids)", len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info(
"loaded corpus with %i documents and %i terms from %s",
self.num_docs, self.num_terms, fname
)
def _calculate_num_docs(self):
"""Get number of documents in file.
Returns
-------
int
Number of documents.
"""
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.open(self.fname, 'rb') as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
"""Covert line into document in BoW format.
Parameters
----------
line : str
Line from input file.
Returns
-------
list of (int, int)
Document in BoW format
"""
words = self.line2words(line)
if self.use_wordids:
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, counts = [], Counter()
for word in words:
if word not in self.word2id:
continue
if word not in counts:
use_words.append(word)
counts[word] += 1
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = [(self.word2id[w], counts[w]) for w in use_words]
else:
word_freqs = Counter(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = list(word_freqs.items())
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format.
"""
with utils.open(self.fname, 'rb') as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the GibbsLda++ format.
Warnings
--------
This function is automatically called by :meth:`gensim.corpora.lowcorpus.LowCorpus.serialize`,
don't call it directly, call :meth:`gensim.corpora.lowcorpus.LowCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `corpus`.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Return
------
list of int
List of offsets in resulting file for each document (in bytes),
can be used for :meth:`~gensim.corpora.lowcorpus.LowCorpus.docbyoffset`
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning(
"List-of-words format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""Get the document stored in file by `offset` position.
Parameters
----------
offset : int
Offset (in bytes) to begin of document.
Returns
-------
list of (int, int)
Document in BoW format.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import LowCorpus
>>>
>>> data = LowCorpus(datapath("testcorpus.low"))
>>> data.docbyoffset(1) # end of first line
[]
>>> data.docbyoffset(2) # start of second line
[(0, 1), (3, 1), (4, 1)]
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
"""Get mapping between words and their ids."""
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = utils.revdict(val)
| 9,535
|
Python
|
.py
| 221
| 32.886878
| 112
| 0.586356
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,157
|
malletcorpus.py
|
piskvorky_gensim/gensim/corpora/malletcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in `Mallet format <http://mallet.cs.umass.edu/import.php>`_."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import LowCorpus
logger = logging.getLogger(__name__)
class MalletCorpus(LowCorpus):
"""Corpus handles input in `Mallet format <http://mallet.cs.umass.edu/import.php>`_.
**Format description**
One file, one instance per line, assume the data is in the following format ::
[URL] [language] [text of the page...]
Or, more generally, ::
[document #1 id] [label] [text of the document...]
[document #2 id] [label] [text of the document...]
...
[document #N id] [label] [text of the document...]
Note that language/label is *not* considered in Gensim, used `__unknown__` as default value.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import get_tmpfile, common_texts
>>> from gensim.corpora import MalletCorpus
>>> from gensim.corpora import Dictionary
>>>
>>> # Prepare needed data
>>> dictionary = Dictionary(common_texts)
>>> corpus = [dictionary.doc2bow(doc) for doc in common_texts]
>>>
>>> # Write corpus in Mallet format to disk
>>> output_fname = get_tmpfile("corpus.mallet")
>>> MalletCorpus.serialize(output_fname, corpus, dictionary)
>>>
>>> # Read corpus
>>> loaded_corpus = MalletCorpus(output_fname)
"""
def __init__(self, fname, id2word=None, metadata=False):
"""
Parameters
----------
fname : str
Path to file in Mallet format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `fname`.
metadata : bool, optional
If True, return additional information ("document id" and "lang" when you call
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.line2doc`,
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.__iter__` or
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.docbyoffset`
"""
self.metadata = metadata
LowCorpus.__init__(self, fname, id2word)
def _calculate_num_docs(self):
"""Get number of documents.
Returns
-------
int
Number of documents in file.
"""
with utils.open(self.fname, 'rb') as fin:
result = sum(1 for _ in fin)
return result
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
"""
with utils.open(self.fname, 'rb') as f:
for line in f:
yield self.line2doc(line)
def line2doc(self, line):
"""Covert line into document in BoW format.
Parameters
----------
line : str
Line from input file.
Returns
-------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import MalletCorpus
>>>
>>> corpus = MalletCorpus(datapath("testcorpus.mallet"))
>>> corpus.line2doc("en computer human interface")
[(3, 1), (4, 1)]
"""
split_line = utils.to_unicode(line).strip().split(None, 2)
docid, doclang = split_line[0], split_line[1]
words = split_line[2] if len(split_line) >= 3 else ''
doc = super(MalletCorpus, self).line2doc(words)
if self.metadata:
return doc, (docid, doclang)
else:
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the Mallet format.
Warnings
--------
This function is automatically called by :meth:`gensim.corpora.malletcorpus.MalletCorpus.serialize`,
don't call it directly, call :meth:`gensim.corpora.lowcorpus.malletcorpus.MalletCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `corpus`.
metadata : bool, optional
If True - ????
Return
------
list of int
List of offsets in resulting file for each document (in bytes),
can be used for :meth:`~gensim.corpora.malletcorpus.Malletcorpus.docbyoffset`.
Notes
-----
The document id will be generated by enumerating the corpus.
That is, it will range between 0 and number of documents in the corpus.
Since Mallet has a language field in the format, this defaults to the string '__unknown__'.
If the language needs to be saved, post-processing will be required.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in Mallet format into %s", fname)
truncated = 0
offsets = []
with utils.open(fname, 'wb') as fout:
for doc_id, doc in enumerate(corpus):
if metadata:
doc_id, doc_lang = doc[1]
doc = doc[0]
else:
doc_lang = '__unknown__'
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s %s %s\n' % (doc_id, doc_lang, ' '.join(words))))
if truncated:
logger.warning(
"Mallet format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""Get the document stored in file by `offset` position.
Parameters
----------
offset : int
Offset (in bytes) to begin of document.
Returns
-------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import MalletCorpus
>>>
>>> data = MalletCorpus(datapath("testcorpus.mallet"))
>>> data.docbyoffset(1) # end of first line
[(3, 1), (4, 1)]
>>> data.docbyoffset(4) # start of second line
[(4, 1)]
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())
| 7,714
|
Python
|
.py
| 185
| 31.475676
| 114
| 0.573415
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,158
|
opinosiscorpus.py
|
piskvorky_gensim/gensim/corpora/opinosiscorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Tobias B <proxima@sezanzeb.de>
# Copyright (C) 2021 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Creates a corpus and dictionary from the Opinosis dataset.
References
----------
.. [1] Ganesan, Kavita and Zhai, ChengXiang and Han, Jiawei. Opinosis: a graph-based approach to abstractive
summarization of highly redundant opinions [online]. In : Proceedings of the 23rd International Conference on
Computational Linguistics. 2010. p. 340-348. Available from: https://kavita-ganesan.com/opinosis/
"""
import os
import re
from gensim.corpora import Dictionary
from gensim.parsing.porter import PorterStemmer
from gensim.parsing.preprocessing import STOPWORDS
class OpinosisCorpus:
"""Creates a corpus and dictionary from the Opinosis dataset.
http://kavita-ganesan.com/opinosis-opinion-dataset/
This data is organized in folders, each folder containing a few short docs.
Data can be obtained quickly using the following commands in bash:
mkdir opinosis && cd opinosis
wget https://github.com/kavgan/opinosis/raw/master/OpinosisDataset1.0_0.zip
unzip OpinosisDataset1.0_0.zip
corpus and dictionary can be accessed by using the .corpus and .id2word members
"""
def __init__(self, path):
"""Load the downloaded corpus.
Parameters
----------
path : string
Path to the extracted zip file. If 'summaries-gold' is in a folder
called 'opinosis', then the Path parameter would be 'opinosis',
either relative to you current working directory or absolute.
"""
# citation
path = os.path.join(path, "summaries-gold")
dictionary = Dictionary()
corpus = []
stemmer = PorterStemmer()
for directory, b, filenames in os.walk(path):
# each subdirectory of path is one collection of reviews to a specific product
# now get the corpus/documents
for filename in filenames:
filepath = directory + os.sep + filename
# write down the document and the topicId and split into train and testdata
with open(filepath) as file:
doc = file.read()
preprocessed_doc = [
stemmer.stem(token) for token in re.findall(r'\w+', doc.lower())
if token not in STOPWORDS
]
dictionary.add_documents([preprocessed_doc])
corpus += [dictionary.doc2bow(preprocessed_doc)]
# and return the results the same way the other corpus generating functions do
self.corpus = corpus
self.id2word = dictionary
| 2,834
|
Python
|
.py
| 59
| 39.576271
| 116
| 0.666304
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,159
|
dictionary.py
|
piskvorky_gensim/gensim/corpora/dictionary.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This module implements the concept of a Dictionary -- a mapping between words and their integer ids."""
from collections import defaultdict
from collections.abc import Mapping
import logging
import itertools
from typing import Optional, List, Tuple
from gensim import utils
logger = logging.getLogger(__name__)
class Dictionary(utils.SaveLoad, Mapping):
"""Dictionary encapsulates the mapping between normalized words and their integer ids.
Notable instance attributes:
Attributes
----------
token2id : dict of (str, int)
token -> token_id. I.e. the reverse mapping to `self[token_id]`.
cfs : dict of (int, int)
Collection frequencies: token_id -> how many instances of this token are contained in the documents.
dfs : dict of (int, int)
Document frequencies: token_id -> how many documents contain this token.
num_docs : int
Number of documents processed.
num_pos : int
Total number of corpus positions (number of processed words).
num_nnz : int
Total number of non-zeroes in the BOW matrix (sum of the number of unique
words per document over the entire corpus).
"""
def __init__(self, documents=None, prune_at=2000000):
"""
Parameters
----------
documents : iterable of iterable of str, optional
Documents to be used to initialize the mapping and collect corpus statistics.
prune_at : int, optional
Dictionary will try to keep no more than `prune_at` words in its mapping, to limit its RAM
footprint, the correctness is not guaranteed.
Use :meth:`~gensim.corpora.dictionary.Dictionary.filter_extremes` to perform proper filtering.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = Dictionary(texts) # initialize a Dictionary
>>> dct.add_documents([["cat", "say", "meow"], ["dog"]]) # add more document (extend the vocabulary)
>>> dct.doc2bow(["dog", "computer", "non_existent_word"])
[(0, 1), (6, 1)]
"""
self.token2id = {}
self.id2token = {}
self.cfs = {}
self.dfs = {}
self.num_docs = 0
self.num_pos = 0
self.num_nnz = 0
if documents is not None:
self.add_documents(documents, prune_at=prune_at)
self.add_lifecycle_event(
"created",
msg=f"built {self} from {self.num_docs} documents (total {self.num_pos} corpus positions)",
)
def __getitem__(self, tokenid):
"""Get the string token that corresponds to `tokenid`.
Parameters
----------
tokenid : int
Id of token.
Returns
-------
str
Token corresponding to `tokenid`.
Raises
------
KeyError
If this Dictionary doesn't contain such `tokenid`.
"""
if len(self.id2token) != len(self.token2id):
# the word->id mapping has changed (presumably via add_documents);
# recompute id->word accordingly
self.id2token = utils.revdict(self.token2id)
return self.id2token[tokenid] # will throw for non-existent ids
def __iter__(self):
"""Iterate over all tokens."""
return iter(self.keys())
# restore Py2-style dict API
iterkeys = __iter__
def iteritems(self):
return self.items()
def itervalues(self):
return self.values()
def keys(self):
"""Get all stored ids.
Returns
-------
list of int
List of all token ids.
"""
return list(self.token2id.values())
def __len__(self):
"""Get number of stored tokens.
Returns
-------
int
Number of stored tokens.
"""
return len(self.token2id)
def __str__(self):
some_keys = list(itertools.islice(self.token2id.keys(), 5))
return "%s<%i unique tokens: %s%s>" % (
self.__class__.__name__, len(self), some_keys, '...' if len(self) > 5 else ''
)
@staticmethod
def from_documents(documents):
"""Create :class:`~gensim.corpora.dictionary.Dictionary` from `documents`.
Equivalent to `Dictionary(documents=documents)`.
Parameters
----------
documents : iterable of iterable of str
Input corpus.
Returns
-------
:class:`~gensim.corpora.dictionary.Dictionary`
Dictionary initialized from `documents`.
"""
return Dictionary(documents=documents)
def add_documents(self, documents, prune_at=2000000):
"""Update dictionary from a collection of `documents`.
Parameters
----------
documents : iterable of iterable of str
Input corpus. All tokens should be already **tokenized and normalized**.
prune_at : int, optional
Dictionary will try to keep no more than `prune_at` words in its mapping, to limit its RAM
footprint, the correctness is not guaranteed.
Use :meth:`~gensim.corpora.dictionary.Dictionary.filter_extremes` to perform proper filtering.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = ["máma mele maso".split(), "ema má máma".split()]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.add_documents([["this", "is", "sparta"], ["just", "joking"]])
>>> len(dct)
10
"""
for docno, document in enumerate(documents):
# log progress & run a regular check for pruning, once every 10k docs
if docno % 10000 == 0:
if prune_at is not None and len(self) > prune_at:
self.filter_extremes(no_below=0, no_above=1.0, keep_n=prune_at)
logger.info("adding document #%i to %s", docno, self)
# update Dictionary with the document
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info("built %s from %i documents (total %i corpus positions)", self, self.num_docs, self.num_pos)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words (BoW) format = list of `(token_id, token_count)` tuples.
Parameters
----------
document : list of str
Input document.
allow_update : bool, optional
Update self, by adding new tokens from `document` and updating internal corpus statistics.
return_missing : bool, optional
Return missing tokens (tokens present in `document` but not in self) with frequencies?
Return
------
list of (int, int)
BoW representation of `document`.
list of (int, int), dict of (str, int)
If `return_missing` is True, return BoW representation of `document` + dictionary with missing
tokens and their frequencies.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> dct = Dictionary(["máma mele maso".split(), "ema má máma".split()])
>>> dct.doc2bow(["this", "is", "máma"])
[(2, 1)]
>>> dct.doc2bow(["this", "is", "máma"], return_missing=True)
([(2, 1)], {u'this': 1, u'is': 1})
"""
if isinstance(document, str):
raise TypeError("doc2bow expects an array of unicode tokens on input, not a single string")
# Construct (word, frequency) mapping.
counter = defaultdict(int)
for w in document:
counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1
token2id = self.token2id
if allow_update or return_missing:
missing = sorted(x for x in counter.items() if x[0] not in token2id)
if allow_update:
for w, _ in missing:
# new id = number of ids made so far;
# NOTE this assumes there are no gaps in the id sequence!
token2id[w] = len(token2id)
result = {token2id[w]: freq for w, freq in counter.items() if w in token2id}
if allow_update:
self.num_docs += 1
self.num_pos += sum(counter.values())
self.num_nnz += len(result)
# keep track of document and collection frequencies
for tokenid, freq in result.items():
self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(result.items())
if return_missing:
return result, dict(missing)
else:
return result
def doc2idx(self, document, unknown_word_index=-1):
"""Convert `document` (a list of words) into a list of indexes = list of `token_id`.
Replace all unknown words i.e, words not in the dictionary with the index as set via `unknown_word_index`.
Parameters
----------
document : list of str
Input document
unknown_word_index : int, optional
Index to use for words not in the dictionary.
Returns
-------
list of int
Token ids for tokens in `document`, in the same order.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["a", "a", "b"], ["a", "c"]]
>>> dct = Dictionary(corpus)
>>> dct.doc2idx(["a", "a", "c", "not_in_dictionary", "c"])
[0, 0, 2, -1, 2]
"""
if isinstance(document, str):
raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string")
document = [word if isinstance(word, str) else str(word, 'utf-8') for word in document]
return [self.token2id.get(word, unknown_word_index) for word in document]
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000, keep_tokens=None):
"""Filter out tokens in the dictionary by their frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
keep_tokens : iterable of str
Iterable of tokens that **must** stay in dictionary after filtering.
Notes
-----
This removes all tokens in the dictionary that are:
#. Less frequent than `no_below` documents (absolute number, e.g. `5`) or \n
#. More frequent than `no_above` documents (fraction of the total corpus size, e.g. `0.3`).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `keep_n=None`).
After the pruning, resulting gaps in word ids are shrunk.
Due to this gap shrinking, **the same word may have a different word id before and after the call
to this function!** See :class:`gensim.models.VocabTransform` and the
`dedicated FAQ entry <https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ#q8-how-can-i-filter-a-saved-corpus-and-its-corresponding-dictionary>`_ on how # noqa
to transform a corpus built with a dictionary before pruning.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> len(dct)
1
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
# determine which tokens to keep
if keep_tokens:
keep_ids = {self.token2id[v] for v in keep_tokens if v in self.token2id}
good_ids = [
v for v in self.token2id.values()
if no_below <= self.dfs.get(v, 0) <= no_above_abs or v in keep_ids
]
good_ids.sort(key=lambda x: self.num_docs if x in keep_ids else self.dfs.get(x, 0), reverse=True)
else:
good_ids = [
v for v in self.token2id.values()
if no_below <= self.dfs.get(v, 0) <= no_above_abs
]
good_ids.sort(key=self.dfs.get, reverse=True)
if keep_n is not None:
good_ids = good_ids[:keep_n]
bad_words = [(self[idx], self.dfs.get(idx, 0)) for idx in set(self).difference(good_ids)]
logger.info("discarding %i tokens: %s...", len(self) - len(good_ids), bad_words[:10])
logger.info(
"keeping %i tokens which were in no less than %i and no more than %i (=%.1f%%) documents",
len(good_ids), no_below, no_above_abs, 100.0 * no_above
)
# do the actual filtering, then rebuild dictionary to remove gaps in ids
self.filter_tokens(good_ids=good_ids)
logger.info("resulting dictionary: %s", self)
def filter_n_most_frequent(self, remove_n):
"""Filter out the 'remove_n' most frequent tokens that appear in the documents.
Parameters
----------
remove_n : int
Number of the most frequent tokens that will be removed.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.filter_n_most_frequent(2)
>>> len(dct)
3
"""
# determine which tokens to keep
most_frequent_ids = (v for v in self.token2id.values())
most_frequent_ids = sorted(most_frequent_ids, key=self.dfs.get, reverse=True)
most_frequent_ids = most_frequent_ids[:remove_n]
# do the actual filtering, then rebuild dictionary to remove gaps in ids
most_frequent_words = [(self[idx], self.dfs.get(idx, 0)) for idx in most_frequent_ids]
logger.info("discarding %i tokens: %s...", len(most_frequent_ids), most_frequent_words[:10])
self.filter_tokens(bad_ids=most_frequent_ids)
logger.info("resulting dictionary: %s", self)
def filter_tokens(self, bad_ids=None, good_ids=None):
"""Remove the selected `bad_ids` tokens from :class:`~gensim.corpora.dictionary.Dictionary`.
Alternatively, keep selected `good_ids` in :class:`~gensim.corpora.dictionary.Dictionary` and remove the rest.
Parameters
----------
bad_ids : iterable of int, optional
Collection of word ids to be removed.
good_ids : collection of int, optional
Keep selected collection of word ids and remove the rest.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> 'ema' in dct.token2id
True
>>> dct.filter_tokens(bad_ids=[dct.token2id['ema']])
>>> 'ema' in dct.token2id
False
>>> len(dct)
4
>>> dct.filter_tokens(good_ids=[dct.token2id['maso']])
>>> len(dct)
1
"""
if bad_ids is not None:
bad_ids = set(bad_ids)
self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if tokenid not in bad_ids}
self.cfs = {tokenid: freq for tokenid, freq in self.cfs.items() if tokenid not in bad_ids}
self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if tokenid not in bad_ids}
if good_ids is not None:
good_ids = set(good_ids)
self.token2id = {token: tokenid for token, tokenid in self.token2id.items() if tokenid in good_ids}
self.cfs = {tokenid: freq for tokenid, freq in self.cfs.items() if tokenid in good_ids}
self.dfs = {tokenid: freq for tokenid, freq in self.dfs.items() if tokenid in good_ids}
self.compactify()
def compactify(self):
"""Assign new word ids to all words, shrinking any gaps."""
logger.debug("rebuilding dictionary, shrinking gaps")
# build mapping from old id -> new id
idmap = dict(zip(sorted(self.token2id.values()), range(len(self.token2id))))
# reassign mappings to new ids
self.token2id = {token: idmap[tokenid] for token, tokenid in self.token2id.items()}
self.id2token = {}
self.dfs = {idmap[tokenid]: freq for tokenid, freq in self.dfs.items()}
self.cfs = {idmap[tokenid]: freq for tokenid, freq in self.cfs.items()}
def save_as_text(self, fname, sort_by_word=True):
"""Save :class:`~gensim.corpora.dictionary.Dictionary` to a text file.
Parameters
----------
fname : str
Path to output file.
sort_by_word : bool, optional
Sort words in lexicographical order before writing them out?
Notes
-----
Format::
num_docs
id_1[TAB]word_1[TAB]document_frequency_1[NEWLINE]
id_2[TAB]word_2[TAB]document_frequency_2[NEWLINE]
....
id_k[TAB]word_k[TAB]document_frequency_k[NEWLINE]
This text format is great for corpus inspection and debugging. As plaintext, it's also easily portable
to other tools and frameworks. For better performance and to store the entire object state,
including collected corpus statistics, use :meth:`~gensim.corpora.dictionary.Dictionary.save` and
:meth:`~gensim.corpora.dictionary.Dictionary.load` instead.
See Also
--------
:meth:`~gensim.corpora.dictionary.Dictionary.load_from_text`
Load :class:`~gensim.corpora.dictionary.Dictionary` from text file.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> tmp_fname = get_tmpfile("dictionary")
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>>
>>> dct = Dictionary(corpus)
>>> dct.save_as_text(tmp_fname)
>>>
>>> loaded_dct = Dictionary.load_from_text(tmp_fname)
>>> assert dct.token2id == loaded_dct.token2id
"""
logger.info("saving dictionary mapping to %s", fname)
with utils.open(fname, 'wb') as fout:
numdocs_line = "%d\n" % self.num_docs
fout.write(utils.to_utf8(numdocs_line))
if sort_by_word:
for token, tokenid in sorted(self.token2id.items()):
line = "%i\t%s\t%i\n" % (tokenid, token, self.dfs.get(tokenid, 0))
fout.write(utils.to_utf8(line))
else:
for tokenid, freq in sorted(self.dfs.items(), key=lambda item: -item[1]):
line = "%i\t%s\t%i\n" % (tokenid, self[tokenid], freq)
fout.write(utils.to_utf8(line))
def merge_with(self, other):
"""Merge another dictionary into this dictionary, mapping the same tokens to the same ids
and new tokens to new ids.
Notes
-----
The purpose is to merge two corpora created using two different dictionaries: `self` and `other`.
`other` can be any id=>word mapping (a dict, a Dictionary object, ...).
Return a transformation object which, when accessed as `result[doc_from_other_corpus]`, will convert documents
from a corpus built using the `other` dictionary into a document using the new, merged dictionary.
Parameters
----------
other : {dict, :class:`~gensim.corpora.dictionary.Dictionary`}
Other dictionary.
Return
------
:class:`gensim.models.VocabTransform`
Transformation object.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus_1, corpus_2 = [["a", "b", "c"]], [["a", "f", "f"]]
>>> dct_1, dct_2 = Dictionary(corpus_1), Dictionary(corpus_2)
>>> dct_1.doc2bow(corpus_2[0])
[(0, 1)]
>>> transformer = dct_1.merge_with(dct_2)
>>> dct_1.doc2bow(corpus_2[0])
[(0, 1), (3, 2)]
"""
old2new = {}
for other_id, other_token in other.items():
if other_token in self.token2id:
new_id = self.token2id[other_token]
else:
new_id = len(self.token2id)
self.token2id[other_token] = new_id
self.dfs[new_id] = 0
old2new[other_id] = new_id
try:
self.dfs[new_id] += other.dfs[other_id]
except Exception:
# `other` isn't a Dictionary (probably just a dict) => ignore dfs, keep going
pass
try:
self.num_docs += other.num_docs
self.num_nnz += other.num_nnz
self.num_pos += other.num_pos
except Exception:
pass
import gensim.models
return gensim.models.VocabTransform(old2new)
def patch_with_special_tokens(self, special_token_dict):
"""Patch token2id and id2token using a dictionary of special tokens.
**Usecase:** when doing sequence modeling (e.g. named entity recognition), one may want to specify
special tokens that behave differently than others.
One example is the "unknown" token, and another is the padding token.
It is usual to set the padding token to have index `0`, and patching the dictionary with `{'<PAD>': 0}`
would be one way to specify this.
Parameters
----------
special_token_dict : dict of (str, int)
dict containing the special tokens as keys and their wanted indices as values.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>>
>>> special_tokens = {'pad': 0, 'space': 1}
>>> print(dct.token2id)
{'maso': 0, 'mele': 1, 'máma': 2, 'ema': 3, 'má': 4}
>>>
>>> dct.patch_with_special_tokens(special_tokens)
>>> print(dct.token2id)
{'maso': 6, 'mele': 7, 'máma': 2, 'ema': 3, 'má': 4, 'pad': 0, 'space': 1}
"""
possible_ids = []
for token, idx in special_token_dict.items():
if token in self.token2id and self.token2id[token] == idx:
continue
if token in self.token2id and self.token2id[token] != idx:
possible_ids.append(self.token2id[token])
del self.token2id[token]
old_token = self[idx]
self.token2id[token] = idx
self.token2id[old_token] = possible_ids.pop() if \
len(possible_ids) > 0 else len(self.token2id) - 1
self.id2token = {} # Make sure that id2token is updated according to special tokens.
@staticmethod
def load_from_text(fname):
"""Load a previously stored :class:`~gensim.corpora.dictionary.Dictionary` from a text file.
Mirror function to :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`.
Parameters
----------
fname: str
Path to a file produced by :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`.
See Also
--------
:meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`
Save :class:`~gensim.corpora.dictionary.Dictionary` to text file.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> tmp_fname = get_tmpfile("dictionary")
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>>
>>> dct = Dictionary(corpus)
>>> dct.save_as_text(tmp_fname)
>>>
>>> loaded_dct = Dictionary.load_from_text(tmp_fname)
>>> assert dct.token2id == loaded_dct.token2id
"""
result = Dictionary()
with utils.open(fname, 'rb') as f:
for lineno, line in enumerate(f):
line = utils.to_unicode(line)
if lineno == 0:
if line.strip().isdigit():
# Older versions of save_as_text may not write num_docs on first line.
result.num_docs = int(line.strip())
continue
else:
logging.warning("Text does not contain num_docs on the first line.")
try:
wordid, word, docfreq = line[:-1].split('\t')
except Exception:
raise ValueError("invalid line in dictionary file %s: %s"
% (fname, line.strip()))
wordid = int(wordid)
if word in result.token2id:
raise KeyError('token %s is defined as ID %d and as ID %d' % (word, wordid, result.token2id[word]))
result.token2id[word] = wordid
result.dfs[wordid] = int(docfreq)
return result
def most_common(self, n: Optional[int] = None) -> List[Tuple[str, int]]:
"""Return a list of the n most common words and their counts from the most common to the least.
Words with equal counts are ordered in the increasing order of their ids.
Parameters
----------
n : int or None, optional
The number of most common words to be returned. If `None`, all words in the dictionary
will be returned. Default is `None`.
Returns
-------
most_common : list of (str, int)
The n most common words and their counts from the most common to the least.
"""
most_common = [
(self[word], count)
for word, count
in sorted(self.cfs.items(), key=lambda x: (-x[1], x[0]))[:n]
]
return most_common
@staticmethod
def from_corpus(corpus, id2word=None):
"""Create :class:`~gensim.corpora.dictionary.Dictionary` from an existing corpus.
Parameters
----------
corpus : iterable of iterable of (int, number)
Corpus in BoW format.
id2word : dict of (int, object)
Mapping id -> word. If None, the mapping `id2word[word_id] = str(word_id)` will be used.
Notes
-----
This can be useful if you only have a term-document BOW matrix (represented by `corpus`), but not the original
text corpus. This method will scan the term-document count matrix for all word ids that appear in it,
then construct :class:`~gensim.corpora.dictionary.Dictionary` which maps each `word_id -> id2word[word_id]`.
`id2word` is an optional dictionary that maps the `word_id` to a token.
In case `id2word` isn't specified the mapping `id2word[word_id] = str(word_id)` will be used.
Returns
-------
:class:`~gensim.corpora.dictionary.Dictionary`
Inferred dictionary from corpus.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [[(1, 1.0)], [], [(0, 5.0), (2, 1.0)], []]
>>> dct = Dictionary.from_corpus(corpus)
>>> len(dct)
3
"""
result = Dictionary()
max_id = -1
for docno, document in enumerate(corpus):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, result)
result.num_docs += 1
result.num_nnz += len(document)
for wordid, word_freq in document:
max_id = max(wordid, max_id)
result.num_pos += word_freq
result.dfs[wordid] = result.dfs.get(wordid, 0) + 1
if id2word is None:
# make sure length(result) == get_max_id(corpus) + 1
result.token2id = {str(i): i for i in range(max_id + 1)}
else:
# id=>word mapping given: simply copy it
result.token2id = {utils.to_unicode(token): idx for idx, token in id2word.items()}
for idx in result.token2id.values():
# make sure all token ids have a valid `dfs` entry
result.dfs[idx] = result.dfs.get(idx, 0)
logger.info(
"built %s from %i documents (total %i corpus positions)",
result, result.num_docs, result.num_pos
)
return result
| 30,256
|
Python
|
.py
| 649
| 35.465331
| 178
| 0.57008
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,160
|
bleicorpus.py
|
piskvorky_gensim/gensim/corpora/bleicorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Сorpus in Blei's LDA-C format."""
from __future__ import with_statement
from os import path
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class BleiCorpus(IndexedCorpus):
"""Corpus in Blei's LDA-C format.
The corpus is represented as two files: one describing the documents, and another
describing the mapping between words and their ids.
Each document is one line::
N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN
The vocabulary is a file with words, one word per line; word at line K has an implicit `id=K`.
"""
def __init__(self, fname, fname_vocab=None):
"""
Parameters
----------
fname : str
Path to corpus.
fname_vocab : str, optional
Vocabulary file. If `fname_vocab` is None, searching one of variants:
* `fname`.vocab
* `fname`/vocab.txt
* `fname_without_ext`.vocab
* `fname_folder`/vocab.txt
Raises
------
IOError
If vocabulary file doesn't exist.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
if fname_vocab is None:
fname_base, _ = path.splitext(fname)
fname_dir = path.dirname(fname)
for fname_vocab in [
utils.smart_extension(fname, '.vocab'),
utils.smart_extension(fname, '/vocab.txt'),
utils.smart_extension(fname_base, '.vocab'),
utils.smart_extension(fname_dir, '/vocab.txt'),
]:
if path.exists(fname_vocab):
break
else:
raise IOError('BleiCorpus: could not find vocabulary file')
self.fname = fname
with utils.open(fname_vocab, 'rb') as fin:
words = [utils.to_unicode(word).rstrip() for word in fin]
self.id2word = dict(enumerate(words))
def __iter__(self):
"""Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document's BoW representation.
"""
lineno = -1
with utils.open(self.fname, 'rb') as fin:
for lineno, line in enumerate(fin):
yield self.line2doc(line)
self.length = lineno + 1
def line2doc(self, line):
"""Convert line in Blei LDA-C format to document (BoW representation).
Parameters
----------
line : str
Line in Blei's LDA-C format.
Returns
-------
list of (int, float)
Document's BoW representation.
"""
parts = utils.to_unicode(line).split()
if int(parts[0]) != len(parts) - 1:
raise ValueError("invalid format in %s: %s" % (self.fname, repr(line)))
doc = [part.rsplit(':', 1) for part in parts[1:]]
doc = [(int(p1), float(p2)) for p1, p2 in doc]
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the LDA-C format.
Notes
-----
There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Input corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word for `corpus`.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
elif id2word:
num_terms = 1 + max(id2word)
else:
num_terms = 0
logger.info("storing corpus in Blei's LDA-C format into %s", fname)
with utils.open(fname, 'wb') as fout:
offsets = []
for doc in corpus:
doc = list(doc)
offsets.append(fout.tell())
parts = ["%i:%g" % p for p in doc if abs(p[1]) > 1e-7]
fout.write(utils.to_utf8("%i %s\n" % (len(doc), ' '.join(parts))))
# write out vocabulary, in a format compatible with Blei's topics.py script
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s", num_terms, fname_vocab)
with utils.open(fname_vocab, 'wb') as fout:
for featureid in range(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
return offsets
def docbyoffset(self, offset):
"""Get document corresponding to `offset`.
Offset can be given from :meth:`~gensim.corpora.bleicorpus.BleiCorpus.save_corpus`.
Parameters
----------
offset : int
Position of the document in the file (in bytes).
Returns
-------
list of (int, float)
Document in BoW format.
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())
| 5,769
|
Python
|
.py
| 144
| 29.805556
| 114
| 0.565723
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,161
|
indexedcorpus.py
|
piskvorky_gensim/gensim/corpora/indexedcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Base Indexed Corpus class."""
import logging
import numpy
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class IndexedCorpus(interfaces.CorpusABC):
"""Indexed corpus is a mechanism for random-accessing corpora.
While the standard corpus interface in gensim allows iterating over corpus,
we'll show it with :class:`~gensim.corpora.mmcorpus.MmCorpus`.
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('testcorpus.mm'))
>>> for doc in corpus:
... pass
:class:`~gensim.corpora.indexedcorpus.IndexedCorpus` allows accessing the documents with index
in :math:`{O}(1)` look-up time.
.. sourcecode:: pycon
>>> document_index = 3
>>> doc = corpus[document_index]
Notes
-----
This functionality is achieved by storing an extra file (by default named the same as the `fname.index`)
that stores the byte offset of the beginning of each document.
"""
def __init__(self, fname, index_fname=None):
"""
Parameters
----------
fname : str
Path to corpus.
index_fname : str, optional
Path to index, if not provided - used `fname.index`.
"""
try:
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
self.index = utils.unpickle(index_fname)
# change self.index into a numpy.ndarray to support fancy indexing
self.index = numpy.asarray(self.index)
logger.info("loaded corpus index from %s", index_fname)
except Exception:
self.index = None
self.length = None
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None,
progress_cnt=None, labels=None, metadata=False):
"""Serialize corpus with offset metadata, allows to use direct indexes after loading.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
index_fname : str, optional
Where to save resulting index, if None - store index to `fname`.index.
progress_cnt : int, optional
Number of documents after which progress info is printed.
labels : bool, optional
If True - ignore first column (class labels).
metadata : bool, optional
If True - ensure that serialize will write out article titles to a pickle file.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [[(1, 0.3), (2, 0.1)], [(1, 0.1)], [(2, 0.3)]]
>>> output_fname = get_tmpfile("test.mm")
>>>
>>> MmCorpus.serialize(output_fname, corpus)
>>> mm = MmCorpus(output_fname) # `mm` document stream now has random access
>>> print(mm[1]) # retrieve document no. 42, etc.
[(1, 0.1)]
"""
if getattr(corpus, 'fname', None) == fname:
raise ValueError("identical input vs. output corpus filename, refusing to serialize: %s" % fname)
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
kwargs = {'metadata': metadata}
if progress_cnt is not None:
kwargs['progress_cnt'] = progress_cnt
if labels is not None:
kwargs['labels'] = labels
offsets = serializer.save_corpus(fname, corpus, id2word, **kwargs)
if offsets is None:
raise NotImplementedError(
"Called serialize on class %s which doesn't support indexing!" % serializer.__name__
)
# store offsets persistently, using pickle
# we shouldn't have to worry about self.index being a numpy.ndarray as the serializer will return
# the offsets that are actually stored on disk - we're not storing self.index in any case, the
# load just needs to turn whatever is loaded from disk back into a ndarray - this should also ensure
# backwards compatibility
logger.info("saving %s index to %s", serializer.__name__, index_fname)
utils.pickle(offsets, index_fname)
def __len__(self):
"""Get the index length.
Notes
-----
If the corpus is not indexed, also count corpus length and cache this value.
Returns
-------
int
Length of index.
"""
if self.index is not None:
return len(self.index)
if self.length is None:
logger.info("caching corpus length")
self.length = sum(1 for _ in self)
return self.length
def __getitem__(self, docno):
"""Get document by `docno` index.
Parameters
----------
docno : {int, iterable of int}
Document number or iterable of numbers (like a list of str).
Returns
-------
list of (int, float)
If `docno` is int - return document in BoW format.
:class:`~gensim.utils.SlicedCorpus`
If `docno` is iterable of int - return several documents in BoW format
wrapped to :class:`~gensim.utils.SlicedCorpus`.
Raises
------
RuntimeError
If index isn't exist.
"""
if self.index is None:
raise RuntimeError("Cannot call corpus[docid] without an index")
if isinstance(docno, (slice, list, numpy.ndarray)):
return utils.SlicedCorpus(self, docno)
elif isinstance(docno, (int, numpy.integer,)):
return self.docbyoffset(self.index[docno])
# TODO: no `docbyoffset` method, should be defined in this class
else:
raise ValueError('Unrecognised value for docno, use either a single integer, a slice or a numpy.ndarray')
| 6,453
|
Python
|
.py
| 148
| 33.97973
| 117
| 0.603512
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,162
|
ucicorpus.py
|
piskvorky_gensim/gensim/corpora/ucicorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Corpus in `UCI format <http://archive.ics.uci.edu/ml/datasets/Bag+of+Words>`_."""
import logging
from collections import defaultdict
from gensim import utils
from gensim.corpora import Dictionary
from gensim.corpora import IndexedCorpus
from gensim.matutils import MmReader
from gensim.matutils import MmWriter
logger = logging.getLogger(__name__)
class UciReader(MmReader):
"""Reader of UCI format for :class:`gensim.corpora.ucicorpus.UciCorpus`."""
def __init__(self, input):
"""
Parameters
----------
input : str
Path to file in UCI format.
"""
logger.info('Initializing corpus reader from %s', input)
self.input = input
with utils.open(self.input, 'rb') as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info(
"accepted corpus with %i documents, %i features, %i non-zero entries",
self.num_docs, self.num_terms, self.num_nnz
)
def skip_headers(self, input_file):
"""Skip headers in `input_file`.
Parameters
----------
input_file : file
File object.
"""
for lineno, _ in enumerate(input_file):
if lineno == 2:
break
class UciWriter(MmWriter):
"""Writer of UCI format for :class:`gensim.corpora.ucicorpus.UciCorpus`.
Notes
---------
This corpus format is identical to `Matrix Market format<http://math.nist.gov/MatrixMarket/formats.html>,
except for different file headers. There is no format line, and the first three lines of the file
contain `number_docs`, `num_terms`, and `num_nnz`, one value per line.
"""
MAX_HEADER_LENGTH = 20 # reserve 20 bytes per header value
FAKE_HEADER = utils.to_utf8(' ' * MAX_HEADER_LENGTH + '\n')
def write_headers(self):
"""Write blank header lines. Will be updated later, once corpus stats are known."""
for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = -1
self.headers_written = True
def update_headers(self, num_docs, num_terms, num_nnz):
"""Update headers with actual values."""
offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if len(value) > len(self.FAKE_HEADER):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False):
"""Write corpus in file.
Parameters
----------
fname : str
Path to output file.
corpus: iterable of list of (int, int)
Corpus in BoW format.
progress_cnt : int, optional
Progress counter, write log message each `progress_cnt` documents.
index : bool, optional
If True - return offsets, otherwise - nothing.
Return
------
list of int
Sequence of offsets to documents (in bytes), only if index=True.
"""
writer = UciWriter(fname)
writer.write_headers()
num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
for docno, bow in enumerate(corpus):
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i", docno)
if index:
posnow = writer.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
vector = [(x, int(y)) for (x, y) in bow if int(y) != 0] # integer count, not floating weights
max_id, veclen = writer.write_vector(docno, vector)
num_terms = max(num_terms, 1 + max_id)
num_nnz += veclen
num_docs = docno + 1
if num_docs * num_terms != 0:
logger.info(
"saved %ix%i matrix, density=%.3f%% (%i/%i)",
num_docs, num_terms, 100.0 * num_nnz / (num_docs * num_terms),
num_nnz, num_docs * num_terms
)
# now write proper headers, by seeking and overwriting the spaces written earlier
writer.update_headers(num_docs, num_terms, num_nnz)
writer.close()
if index:
return offsets
class UciCorpus(UciReader, IndexedCorpus):
"""Corpus in the UCI bag-of-words format."""
def __init__(self, fname, fname_vocab=None):
"""
Parameters
----------
fname : str
Path to corpus in UCI format.
fname_vocab : bool, optional
Path to vocab.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import UciCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = UciCorpus(datapath('testcorpus.uci'))
>>> for document in corpus:
... pass
"""
IndexedCorpus.__init__(self, fname)
UciReader.__init__(self, fname)
if fname_vocab is None:
fname_vocab = utils.smart_extension(fname, '.vocab')
self.fname = fname
with utils.open(fname_vocab, 'rb') as fin:
words = [word.strip() for word in fin]
self.id2word = dict(enumerate(words))
self.transposed = True
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format.
"""
for docId, doc in super(UciCorpus, self).__iter__():
yield doc # get rid of docId, return the sparse vector only
def create_dictionary(self):
"""Generate :class:`gensim.corpora.dictionary.Dictionary` directly from the corpus and vocabulary data.
Return
------
:class:`gensim.corpora.dictionary.Dictionary`
Dictionary, based on corpus.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.ucicorpus import UciCorpus
>>> from gensim.test.utils import datapath
>>> ucc = UciCorpus(datapath('testcorpus.uci'))
>>> dictionary = ucc.create_dictionary()
"""
dictionary = Dictionary()
# replace dfs with defaultdict to avoid downstream KeyErrors
# uci vocabularies may contain terms that are not used in the document data
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = utils.revdict(self.id2word)
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for docno, doc in enumerate(self):
if docno % 10000 == 0:
logger.info('PROGRESS: processing document %i of %i', docno, self.num_docs)
for word, count in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
"""Save a corpus in the UCI Bag-of-Words format.
Warnings
--------
This function is automatically called by :meth`gensim.corpora.ucicorpus.UciCorpus.serialize`,
don't call it directly, call :meth`gensim.corpora.ucicorpus.UciCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus: iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}, optional
Mapping between words and their ids. If None - will be inferred from `corpus`.
progress_cnt : int, optional
Progress counter, write log message each `progress_cnt` documents.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Notes
-----
There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
elif id2word:
num_terms = 1 + max(id2word)
else:
num_terms = 0
# write out vocabulary
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s", num_terms, fname_vocab)
with utils.open(fname_vocab, 'wb') as fout:
for featureid in range(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
logger.info("storing corpus in UCI Bag-of-Words format: %s", fname)
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
| 9,563
|
Python
|
.py
| 227
| 32.132159
| 114
| 0.588673
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,163
|
textcorpus.py
|
piskvorky_gensim/gensim/corpora/textcorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Module provides some code scaffolding to simplify use of built dictionary for constructing BoW vectors.
Notes
-----
Text corpora usually reside on disk, as text files in one format or another In a common scenario,
we need to build a dictionary (a `word->integer id` mapping), which is then used to construct sparse bag-of-word vectors
(= iterable of `(word_id, word_weight)`).
This module provides some code scaffolding to simplify this pipeline. For example, given a corpus where each document
is a separate line in file on disk, you would override the :meth:`gensim.corpora.textcorpus.TextCorpus.get_texts`
to read one line=document at a time, process it (lowercase, tokenize, whatever) and yield it as a sequence of words.
Overriding :meth:`gensim.corpora.textcorpus.TextCorpus.get_texts` is enough, you can then initialize the corpus
with e.g. `MyTextCorpus("mycorpus.txt.bz2")` and it will behave correctly like a corpus of sparse vectors.
The :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__` method is automatically set up,
and dictionary is automatically populated with all `word->id` mappings.
The resulting object can be used as input to some of gensim models (:class:`~gensim.models.tfidfmodel.TfidfModel`,
:class:`~gensim.models.lsimodel.LsiModel`, :class:`~gensim.models.ldamodel.LdaModel`, ...), serialized with any format
(`Matrix Market <http://math.nist.gov/MatrixMarket/formats.html>`_,
`SvmLight <http://svmlight.joachims.org/>`_, `Blei's LDA-C format <https://github.com/blei-lab/lda-c>`_, etc).
See Also
--------
:class:`gensim.test.test_miislita.CorpusMiislita`
Good simple example.
"""
from __future__ import with_statement
import logging
import os
import random
import re
import sys
from gensim import interfaces, utils
from gensim.corpora.dictionary import Dictionary
from gensim.parsing.preprocessing import (
remove_stopword_tokens, remove_short_tokens,
lower_to_unicode, strip_multiple_whitespaces,
)
from gensim.utils import deaccent, simple_tokenize
from smart_open import open
logger = logging.getLogger(__name__)
class TextCorpus(interfaces.CorpusABC):
"""Helper class to simplify the pipeline of getting BoW vectors from plain text.
Notes
-----
This is an abstract base class: override the :meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` and
:meth:`~gensim.corpora.textcorpus.TextCorpus.__len__` methods to match your particular input.
Given a filename (or a file-like object) in constructor, the corpus object will be automatically initialized
with a dictionary in `self.dictionary` and will support the :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__`
corpus method. You have a few different ways of utilizing this class via subclassing or by construction with
different preprocessing arguments.
The :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__` method converts the lists of tokens produced by
:meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` to BoW format using
:meth:`gensim.corpora.dictionary.Dictionary.doc2bow`.
:meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` does the following:
#. Calls :meth:`~gensim.corpora.textcorpus.TextCorpus.getstream` to get a generator over the texts.
It yields each document in turn from the underlying text file or files.
#. For each document from the stream, calls :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` to produce
a list of tokens. If metadata=True, it yields a 2-`tuple` with the document number as the second element.
Preprocessing consists of 0+ `character_filters`, a `tokenizer`, and 0+ `token_filters`.
The preprocessing consists of calling each filter in `character_filters` with the document text.
Unicode is not guaranteed, and if desired, the first filter should convert to unicode.
The output of each character filter should be another string. The output from the final filter is fed
to the `tokenizer`, which should split the string into a list of tokens (strings).
Afterwards, the list of tokens is fed through each filter in `token_filters`. The final output returned from
:meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` is the output from the final token filter.
So to use this class, you can either pass in different preprocessing functions using the
`character_filters`, `tokenizer`, and `token_filters` arguments, or you can subclass it.
If subclassing: override :meth:`~gensim.corpora.textcorpus.TextCorpus.getstream` to take text from different input
sources in different formats.
Override :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` if you must provide different initial
preprocessing, then call the :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` method to apply
the normal preprocessing.
You can also override :meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` in order to tag the documents
(token lists) with different metadata.
The default preprocessing consists of:
#. :func:`~gensim.parsing.preprocessing.lower_to_unicode` - lowercase and convert to unicode (assumes utf8 encoding)
#. :func:`~gensim.utils.deaccent`- deaccent (asciifolding)
#. :func:`~gensim.parsing.preprocessing.strip_multiple_whitespaces` - collapse multiple whitespaces into one
#. :func:`~gensim.utils.simple_tokenize` - tokenize by splitting on whitespace
#. :func:`~gensim.parsing.preprocessing.remove_short_tokens` - remove words less than 3 characters long
#. :func:`~gensim.parsing.preprocessing.remove_stopword_tokens` - remove stopwords
"""
def __init__(self, input=None, dictionary=None, metadata=False, character_filters=None,
tokenizer=None, token_filters=None):
"""
Parameters
----------
input : str, optional
Path to top-level directory (file) to traverse for corpus documents.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
If `input` is None, the dictionary will remain uninitialized.
metadata : bool, optional
If True - yield metadata with each document.
character_filters : iterable of callable, optional
Each will be applied to the text of each document in order, and should return a single string with
the modified text. For Python 2, the original text will not be unicode, so it may be useful to
convert to unicode as the first character filter.
If None - using :func:`~gensim.parsing.preprocessing.lower_to_unicode`,
:func:`~gensim.utils.deaccent` and :func:`~gensim.parsing.preprocessing.strip_multiple_whitespaces`.
tokenizer : callable, optional
Tokenizer for document, if None - using :func:`~gensim.utils.simple_tokenize`.
token_filters : iterable of callable, optional
Each will be applied to the iterable of tokens in order, and should return another iterable of tokens.
These filters can add, remove, or replace tokens, or do nothing at all.
If None - using :func:`~gensim.parsing.preprocessing.remove_short_tokens` and
:func:`~gensim.parsing.preprocessing.remove_stopword_tokens`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim import utils
>>>
>>>
>>> class CorpusMiislita(TextCorpus):
... stopwords = set('for a of the and to in on'.split())
...
... def get_texts(self):
... for doc in self.getstream():
... yield [word for word in utils.to_unicode(doc).lower().split() if word not in self.stopwords]
...
... def __len__(self):
... self.length = sum(1 for _ in self.get_texts())
... return self.length
>>>
>>>
>>> corpus = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
>>> len(corpus)
250
>>> document = next(iter(corpus.get_texts()))
"""
self.input = input
self.metadata = metadata
self.character_filters = character_filters
if self.character_filters is None:
self.character_filters = [lower_to_unicode, deaccent, strip_multiple_whitespaces]
self.tokenizer = tokenizer
if self.tokenizer is None:
self.tokenizer = simple_tokenize
self.token_filters = token_filters
if self.token_filters is None:
self.token_filters = [remove_short_tokens, remove_stopword_tokens]
self.length = None
self.dictionary = None
self.init_dictionary(dictionary)
def init_dictionary(self, dictionary):
"""Initialize/update dictionary.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
Notes
-----
If self.input is None - make nothing.
"""
self.dictionary = dictionary if dictionary is not None else Dictionary()
if self.input is not None:
if dictionary is None:
logger.info("Initializing dictionary")
metadata_setting = self.metadata
self.metadata = False
self.dictionary.add_documents(self.get_texts())
self.metadata = metadata_setting
else:
logger.info("Input stream provided but dictionary already initialized")
else:
logger.warning("No input document stream provided; assuming dictionary will be initialized some other way.")
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format (+ metadata if self.metadata).
"""
if self.metadata:
for text, metadata in self.get_texts():
yield self.dictionary.doc2bow(text, allow_update=False), metadata
else:
for text in self.get_texts():
yield self.dictionary.doc2bow(text, allow_update=False)
def getstream(self):
"""Generate documents from the underlying plain text collection (of one or more files).
Yields
------
str
Document read from plain-text file.
Notes
-----
After generator end - initialize self.length attribute.
"""
num_texts = 0
with utils.file_or_filename(self.input) as f:
for line in f:
yield line
num_texts += 1
self.length = num_texts
def preprocess_text(self, text):
"""Apply `self.character_filters`, `self.tokenizer`, `self.token_filters` to a single text document.
Parameters
---------
text : str
Document read from plain-text file.
Return
------
list of str
List of tokens extracted from `text`.
"""
for character_filter in self.character_filters:
text = character_filter(text)
tokens = self.tokenizer(text)
for token_filter in self.token_filters:
tokens = token_filter(tokens)
return tokens
def step_through_preprocess(self, text):
"""Apply preprocessor one by one and generate result.
Warnings
--------
This is useful for debugging issues with the corpus preprocessing pipeline.
Parameters
----------
text : str
Document text read from plain-text file.
Yields
------
(callable, object)
Pre-processor, output from pre-processor (based on `text`)
"""
for character_filter in self.character_filters:
text = character_filter(text)
yield (character_filter, text)
tokens = self.tokenizer(text)
yield (self.tokenizer, tokens)
for token_filter in self.token_filters:
yield (token_filter, token_filter(tokens))
def get_texts(self):
"""Generate documents from corpus.
Yields
------
list of str
Document as sequence of tokens (+ lineno if self.metadata)
"""
lines = self.getstream()
if self.metadata:
for lineno, line in enumerate(lines):
yield self.preprocess_text(line), (lineno,)
else:
for line in lines:
yield self.preprocess_text(line)
def sample_texts(self, n, seed=None, length=None):
"""Generate `n` random documents from the corpus without replacement.
Parameters
----------
n : int
Number of documents we want to sample.
seed : int, optional
If specified, use it as a seed for local random generator.
length : int, optional
Value will used as corpus length (because calculate length of corpus can be costly operation).
If not specified - will call `__length__`.
Raises
------
ValueError
If `n` less than zero or greater than corpus size.
Notes
-----
Given the number of remaining documents in a corpus, we need to choose n elements.
The probability for the current element to be chosen is `n` / remaining. If we choose it, we just decrease
the `n` and move to the next element.
Yields
------
list of str
Sampled document as sequence of tokens.
"""
random_generator = random if seed is None else random.Random(seed)
if length is None:
length = len(self)
if not n <= length:
raise ValueError("n {0:d} is larger/equal than length of corpus {1:d}.".format(n, length))
if not 0 <= n:
raise ValueError("Negative sample size n {0:d}.".format(n))
i = 0
for i, sample in enumerate(self.getstream()):
if i == length:
break
remaining_in_corpus = length - i
chance = random_generator.randint(1, remaining_in_corpus)
if chance <= n:
n -= 1
if self.metadata:
yield self.preprocess_text(sample[0]), sample[1]
else:
yield self.preprocess_text(sample)
if n != 0:
# This means that length was set to be greater than number of items in corpus
# and we were not able to sample enough documents before the stream ended.
raise ValueError("length {0:d} greater than number of documents in corpus {1:d}".format(length, i + 1))
def __len__(self):
"""Get length of corpus
Warnings
--------
If self.length is None - will read all corpus for calculate this attribute through
:meth:`~gensim.corpora.textcorpus.TextCorpus.getstream`.
Returns
-------
int
Length of corpus.
"""
if self.length is None:
# cache the corpus length
self.length = sum(1 for _ in self.getstream())
return self.length
class TextDirectoryCorpus(TextCorpus):
"""Read documents recursively from a directory.
Each file/line (depends on `lines_are_documents`) is interpreted as a plain text document.
"""
def __init__(self, input, dictionary=None, metadata=False, min_depth=0, max_depth=None,
pattern=None, exclude_pattern=None, lines_are_documents=False, encoding='utf-8', **kwargs):
"""
Parameters
----------
input : str
Path to input file/folder.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
If `input` is None, the dictionary will remain uninitialized.
metadata : bool, optional
If True - yield metadata with each document.
min_depth : int, optional
Minimum depth in directory tree at which to begin searching for files.
max_depth : int, optional
Max depth in directory tree at which files will no longer be considered.
If None - not limited.
pattern : str, optional
Regex to use for file name inclusion, all those files *not* matching this pattern will be ignored.
exclude_pattern : str, optional
Regex to use for file name exclusion, all files matching this pattern will be ignored.
lines_are_documents : bool, optional
If True - each line is considered a document, otherwise - each file is one document.
encoding : str, optional
Encoding used to read the specified file or files in the specified directory.
kwargs: keyword arguments passed through to the `TextCorpus` constructor.
See :meth:`gemsim.corpora.textcorpus.TextCorpus.__init__` docstring for more details on these.
"""
self._min_depth = min_depth
self._max_depth = sys.maxsize if max_depth is None else max_depth
self.pattern = pattern
self.exclude_pattern = exclude_pattern
self.lines_are_documents = lines_are_documents
self.encoding = encoding
super(TextDirectoryCorpus, self).__init__(input, dictionary, metadata, **kwargs)
@property
def lines_are_documents(self):
return self._lines_are_documents
@lines_are_documents.setter
def lines_are_documents(self, lines_are_documents):
self._lines_are_documents = lines_are_documents
self.length = None
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, pattern):
self._pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def exclude_pattern(self):
return self._exclude_pattern
@exclude_pattern.setter
def exclude_pattern(self, pattern):
self._exclude_pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def min_depth(self):
return self._min_depth
@min_depth.setter
def min_depth(self, min_depth):
self._min_depth = min_depth
self.length = None
@property
def max_depth(self):
return self._max_depth
@max_depth.setter
def max_depth(self, max_depth):
self._max_depth = max_depth
self.length = None
def iter_filepaths(self):
"""Generate (lazily) paths to each file in the directory structure within the specified range of depths.
If a filename pattern to match was given, further filter to only those filenames that match.
Yields
------
str
Path to file
"""
for depth, dirpath, dirnames, filenames in walk(self.input):
if self.min_depth <= depth <= self.max_depth:
if self.pattern is not None:
filenames = (n for n in filenames if self.pattern.match(n) is not None)
if self.exclude_pattern is not None:
filenames = (n for n in filenames if self.exclude_pattern.match(n) is None)
for name in filenames:
yield os.path.join(dirpath, name)
def getstream(self):
"""Generate documents from the underlying plain text collection (of one or more files).
Yields
------
str
One document (if lines_are_documents - True), otherwise - each file is one document.
"""
num_texts = 0
for path in self.iter_filepaths():
with open(path, 'rt', encoding=self.encoding) as f:
if self.lines_are_documents:
for line in f:
yield line.strip()
num_texts += 1
else:
yield f.read().strip()
num_texts += 1
self.length = num_texts
def __len__(self):
"""Get length of corpus.
Returns
-------
int
Length of corpus.
"""
if self.length is None:
self._cache_corpus_length()
return self.length
def _cache_corpus_length(self):
"""Calculate length of corpus and cache it to `self.length`."""
if not self.lines_are_documents:
self.length = sum(1 for _ in self.iter_filepaths())
else:
self.length = sum(1 for _ in self.getstream())
def walk(top, topdown=True, onerror=None, followlinks=False, depth=0):
"""Generate the file names in a directory tree by walking the tree either top-down or bottom-up.
For each directory in the tree rooted at directory top (including top itself), it yields a 4-tuple
(depth, dirpath, dirnames, filenames).
Parameters
----------
top : str
Root directory.
topdown : bool, optional
If True - you can modify dirnames in-place.
onerror : function, optional
Some function, will be called with one argument, an OSError instance.
It can report the error to continue with the walk, or raise the exception to abort the walk.
Note that the filename is available as the filename attribute of the exception object.
followlinks : bool, optional
If True - visit directories pointed to by symlinks, on systems that support them.
depth : int, optional
Height of file-tree, don't pass it manually (this used as accumulator for recursion).
Notes
-----
This is a mostly copied version of `os.walk` from the Python 2 source code.
The only difference is that it returns the depth in the directory tree structure
at which each yield is taking place.
Yields
------
(int, str, list of str, list of str)
Depth, current path, visited directories, visited non-directories.
See Also
--------
`os.walk documentation <https://docs.python.org/2/library/os.html#os.walk>`_
"""
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
try:
# Should be O(1) since it's probably just reading your filesystem journal
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
# O(n) where n = number of files in the directory
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield depth, top, dirs, nondirs
# Again O(n), where n = number of directories in the directory
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
# Generator so besides the recursive `walk()` call, no additional cost here.
for x in walk(new_path, topdown, onerror, followlinks, depth + 1):
yield x
if not topdown:
yield depth, top, dirs, nondirs
| 23,920
|
Python
|
.py
| 497
| 38.945674
| 120
| 0.645597
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,164
|
annoy.py
|
piskvorky_gensim/gensim/similarities/annoy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)
library with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.
.. Important::
To use this module, you must have the ``annoy`` library installed.
To install it, run ``pip install annoy``.
"""
# Avoid import collisions on py2: this module has the same name as the actual Annoy library.
from __future__ import absolute_import
import os
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
from gensim import utils
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec
from gensim.models.fasttext import FastText
from gensim.models import KeyedVectors
_NOANNOY = ImportError("Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.")
class AnnoyIndexer():
"""This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)
vector retrieval in `most_similar()` calls of
:class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.
"""
def __init__(self, model=None, num_trees=None):
"""
Parameters
----------
model : trained model, optional
Use vectors from this model as the source for the index.
num_trees : int, optional
Number of trees for Annoy indexer.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.similarities.annoy import AnnoyIndexer
>>> from gensim.models import Word2Vec
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, seed=1)
>>>
>>> indexer = AnnoyIndexer(model, 2)
>>> model.most_similar("cat", topn=2, indexer=indexer)
[('cat', 1.0), ('dog', 0.32011348009109497)]
"""
self.index = None
self.labels = None
self.model = model
self.num_trees = num_trees
if model and num_trees:
# Extract the KeyedVectors object from whatever model we were given.
if isinstance(self.model, Doc2Vec):
kv = self.model.dv
elif isinstance(self.model, (Word2Vec, FastText)):
kv = self.model.wv
elif isinstance(self.model, (KeyedVectors,)):
kv = self.model
else:
raise ValueError("Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used")
self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)
def save(self, fname, protocol=utils.PICKLE_PROTOCOL):
"""Save AnnoyIndexer instance to disk.
Parameters
----------
fname : str
Path to output. Save will produce 2 files:
`fname`: Annoy index itself.
`fname.dict`: Index metadata.
protocol : int, optional
Protocol for pickle.
Notes
-----
This method saves **only the index**. The trained model isn't preserved.
"""
self.index.save(fname)
d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}
with utils.open(fname + '.dict', 'wb') as fout:
_pickle.dump(d, fout, protocol=protocol)
def load(self, fname):
"""Load an AnnoyIndexer instance from disk.
Parameters
----------
fname : str
The path as previously used by ``save()``.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.similarities.index import AnnoyIndexer
>>> from gensim.models import Word2Vec
>>> from tempfile import mkstemp
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)
>>>
>>> indexer = AnnoyIndexer(model, 2)
>>> _, temp_fn = mkstemp()
>>> indexer.save(temp_fn)
>>>
>>> new_indexer = AnnoyIndexer()
>>> new_indexer.load(temp_fn)
>>> new_indexer.model = model
"""
fname_dict = fname + '.dict'
if not (os.path.exists(fname) and os.path.exists(fname_dict)):
raise IOError(
f"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state."
)
try:
from annoy import AnnoyIndex
except ImportError:
raise _NOANNOY
with utils.open(fname_dict, 'rb') as f:
d = _pickle.loads(f.read())
self.num_trees = d['num_trees']
self.index = AnnoyIndex(d['f'], metric='angular')
self.index.load(fname)
self.labels = d['labels']
def _build_from_model(self, vectors, labels, num_features):
try:
from annoy import AnnoyIndex
except ImportError:
raise _NOANNOY
index = AnnoyIndex(num_features, metric='angular')
for vector_num, vector in enumerate(vectors):
index.add_item(vector_num, vector)
index.build(self.num_trees)
self.index = index
self.labels = labels
def most_similar(self, vector, num_neighbors):
"""Find `num_neighbors` most similar items.
Parameters
----------
vector : numpy.array
Vector for word/document.
num_neighbors : int
Number of most similar items
Returns
-------
list of (str, float)
List of most similar items in format [(`item`, `cosine_distance`), ... ]
"""
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]
| 6,503
|
Python
|
.py
| 152
| 33.572368
| 116
| 0.597625
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,165
|
levenshtein.py
|
piskvorky_gensim/gensim/similarities/levenshtein.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vit Novotny <witiko@mail.muni.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module allows fast fuzzy search between strings, using kNN queries with Levenshtein similarity.
"""
import logging
from gensim.similarities.termsim import TermSimilarityIndex
from gensim import utils
try:
from gensim.similarities.fastss import FastSS, editdist # noqa:F401
except ImportError:
raise utils.NO_CYTHON
logger = logging.getLogger(__name__)
class LevenshteinSimilarityIndex(TermSimilarityIndex):
r"""
Retrieve the most similar terms from a static set of terms ("dictionary")
given a query term, using Levenshtein similarity.
"Levenshtein similarity" is a modification of the Levenshtein (edit) distance,
defined in [charletetal17]_.
This implementation uses the :class:`~gensim.similarities.fastss.FastSS` algorithm
for fast kNN nearest-neighbor retrieval.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies the considered terms.
alpha : float, optional
Multiplicative factor `alpha` for the Levenshtein similarity. See [charletetal17]_.
beta : float, optional
The exponential factor `beta` for the Levenshtein similarity. See [charletetal17]_.
max_distance : int, optional
Do not consider terms with Levenshtein distance larger than this as
"similar". This is done for performance reasons: keep this value below 3
for reasonable retrieval performance. Default is 1.
See Also
--------
:class:`~gensim.similarities.termsim.WordEmbeddingSimilarityIndex`
Retrieve most similar terms for a given term using the cosine
similarity over word embeddings.
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
Build a term similarity matrix and compute the Soft Cosine Measure.
References
----------
.. [charletetal17] Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3:
Soft-Cosine Semantic Similarity between Questions for Community Question Answering", 2017,
https://www.aclweb.org/anthology/S17-2051/.
"""
def __init__(self, dictionary, alpha=1.8, beta=5.0, max_distance=2):
self.dictionary = dictionary
self.alpha = alpha
self.beta = beta
self.max_distance = max_distance
logger.info("creating FastSS index from %s", dictionary)
self.index = FastSS(words=self.dictionary.values(), max_dist=max_distance)
super(LevenshteinSimilarityIndex, self).__init__()
def levsim(self, t1, t2, distance):
"""Calculate the Levenshtein similarity between two terms given their Levenshtein distance."""
max_lengths = max(len(t1), len(t2)) or 1
return self.alpha * (1.0 - distance * 1.0 / max_lengths)**self.beta
def most_similar(self, t1, topn=10):
"""kNN fuzzy search: find the `topn` most similar terms from `self.dictionary` to `t1`."""
result = {} # map of {dictionary term => its levenshtein similarity to t1}
if self.max_distance > 0:
effective_topn = topn + 1 if t1 in self.dictionary.token2id else topn
effective_topn = min(len(self.dictionary), effective_topn)
# Implement a "distance backoff" algorithm:
# Start with max_distance=1, for performance. And if that doesn't return enough results,
# continue with max_distance=2 etc, all the way until self.max_distance which
# is a hard cutoff.
# At that point stop searching, even if we don't have topn results yet.
#
# We use the backoff algo to speed up queries for short terms. These return enough results already
# with max_distance=1.
#
# See the discussion at https://github.com/RaRe-Technologies/gensim/pull/3146
for distance in range(1, self.max_distance + 1):
for t2 in self.index.query(t1, distance).get(distance, []):
if t1 == t2:
continue
similarity = self.levsim(t1, t2, distance)
if similarity > 0:
result[t2] = similarity
if len(result) >= effective_topn:
break
return sorted(result.items(), key=lambda x: (-x[1], x[0]))[:topn]
| 4,526
|
Python
|
.py
| 87
| 43.712644
| 110
| 0.672246
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,166
|
fastss.pyx
|
piskvorky_gensim/gensim/similarities/fastss.pyx
|
#!/usr/bin/env cython
# cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# coding: utf-8
#
# Copyright (C) 2021 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# Code adapted from TinyFastSS (public domain), https://github.com/fujimotos/TinyFastSS
"""Fast approximate string similarity search using the FastSS algorithm."""
import itertools
from cpython.ref cimport PyObject
DEF MAX_WORD_LENGTH = 1000 # Maximum allowed word length, in characters. Must fit in the C `int` range.
cdef extern from *:
"""
#define WIDTH int
#define MAX_WORD_LENGTH 1000
int ceditdist(PyObject * s1, PyObject * s2, WIDTH maximum) {
WIDTH row1[MAX_WORD_LENGTH + 1];
WIDTH row2[MAX_WORD_LENGTH + 1];
WIDTH * CYTHON_RESTRICT pos_new;
WIDTH * CYTHON_RESTRICT pos_old;
int row_flip = 1; /* Does pos_new represent row1 or row2? */
int kind1 = PyUnicode_KIND(s1); /* How many bytes per unicode codepoint? */
int kind2 = PyUnicode_KIND(s2);
WIDTH len_s1 = (WIDTH)PyUnicode_GET_LENGTH(s1);
WIDTH len_s2 = (WIDTH)PyUnicode_GET_LENGTH(s2);
if (len_s1 > len_s2) {
PyObject * tmp = s1; s1 = s2; s2 = tmp;
const WIDTH tmpi = len_s1; len_s1 = len_s2; len_s2 = tmpi;
}
if (len_s2 - len_s1 > maximum) return maximum + 1;
if (len_s2 > MAX_WORD_LENGTH) return -1;
void * s1_data = PyUnicode_DATA(s1);
void * s2_data = PyUnicode_DATA(s2);
WIDTH tmpi;
for (tmpi = 0; tmpi <= len_s1; tmpi++) row2[tmpi] = tmpi;
WIDTH i2;
for (i2 = 0; i2 < len_s2; i2++) {
int all_bad = i2 >= maximum;
const Py_UCS4 ch = PyUnicode_READ(kind2, s2_data, i2);
row_flip = 1 - row_flip;
if (row_flip) {
pos_new = row2; pos_old = row1;
} else {
pos_new = row1; pos_old = row2;
}
*pos_new = i2 + 1;
WIDTH i1;
for (i1 = 0; i1 < len_s1; i1++) {
WIDTH val = *(pos_old++);
if (ch != PyUnicode_READ(kind1, s1_data, i1)) {
const WIDTH _val1 = *pos_old;
const WIDTH _val2 = *pos_new;
if (_val1 < val) val = _val1;
if (_val2 < val) val = _val2;
val += 1;
}
*(++pos_new) = val;
if (all_bad && val <= maximum) all_bad = 0;
}
if (all_bad) return maximum + 1;
}
return row_flip ? row2[len_s1] : row1[len_s1];
}
"""
int ceditdist(PyObject *s1, PyObject *s2, int maximum)
def editdist(s1: str, s2: str, max_dist=None):
"""
Return the Levenshtein distance between two strings.
Use `max_dist` to control the maximum distance you care about. If the actual distance is larger
than `max_dist`, editdist will return early, with the value `max_dist+1`.
This is a performance optimization – for example if anything above distance 2 is uninteresting
to your application, call editdist with `max_dist=2` and ignore any return value greater than 2.
Leave `max_dist=None` (default) to always return the full Levenshtein distance (slower).
"""
if s1 == s2:
return 0
result = ceditdist(<PyObject *>s1, <PyObject *>s2, MAX_WORD_LENGTH if max_dist is None else int(max_dist))
if result >= 0:
return result
elif result == -1:
raise ValueError(f"editdist doesn't support strings longer than {MAX_WORD_LENGTH} characters")
else:
raise ValueError(f"editdist returned an error: {result}")
def indexkeys(word, max_dist):
"""Return the set of index keys ("variants") of a word.
>>> indexkeys('aiu', 1)
{'aiu', 'iu', 'au', 'ai'}
"""
res = set()
wordlen = len(word)
limit = min(max_dist, wordlen) + 1
for dist in range(limit):
for variant in itertools.combinations(word, wordlen - dist):
res.add(''.join(variant))
return res
def set2bytes(s):
"""Serialize a set of unicode strings into bytes.
>>> set2byte({u'a', u'b', u'c'})
b'a\x00b\x00c'
"""
return '\x00'.join(s).encode('utf8')
def bytes2set(b):
"""Deserialize bytes into a set of unicode strings.
>>> bytes2set(b'a\x00b\x00c')
{u'a', u'b', u'c'}
"""
return set(b.decode('utf8').split('\x00')) if b else set()
class FastSS:
"""
Fast implementation of FastSS (Fast Similarity Search): https://fastss.csg.uzh.ch/
FastSS enables fuzzy search of a dynamic query (a word, string) against a static
dictionary (a set of words, strings). The "fuziness" is configurable by means
of a maximum edit distance (Levenshtein) between the query string and any of the
dictionary words.
"""
def __init__(self, words=None, max_dist=2):
"""
Create a FastSS index. The index will contain encoded variants of all
indexed words, allowing fast "fuzzy string similarity" queries.
max_dist: maximum allowed edit distance of an indexed word to a query word. Keep
max_dist<=3 for sane performance.
"""
self.db = {}
self.max_dist = max_dist
if words:
for word in words:
self.add(word)
def __str__(self):
return "%s<max_dist=%s, db_size=%i>" % (self.__class__.__name__, self.max_dist, len(self.db), )
def __contains__(self, word):
bkey = word.encode('utf8')
if bkey in self.db:
return word in bytes2set(self.db[bkey])
return False
def add(self, word):
"""Add a string to the index."""
for key in indexkeys(word, self.max_dist):
bkey = key.encode('utf8')
wordset = {word}
if bkey in self.db:
wordset |= bytes2set(self.db[bkey])
self.db[bkey] = set2bytes(wordset)
def query(self, word, max_dist=None):
"""Find all words from the index that are within max_dist of `word`."""
if max_dist is None:
max_dist = self.max_dist
if max_dist > self.max_dist:
raise ValueError(
f"query max_dist={max_dist} cannot be greater than max_dist={self.max_dist} from the constructor"
)
res = {d: [] for d in range(max_dist + 1)}
cands = set()
for key in indexkeys(word, max_dist):
bkey = key.encode('utf8')
if bkey in self.db:
cands.update(bytes2set(self.db[bkey]))
for cand in cands:
dist = editdist(word, cand, max_dist=max_dist)
if dist <= max_dist:
res[dist].append(cand)
return res
| 6,866
|
Python
|
.py
| 163
| 33.392638
| 113
| 0.587077
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,167
|
docsim.py
|
piskvorky_gensim/gensim/similarities/docsim.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Compute similarities across a collection of documents in the Vector Space Model.
The main class is :class:`~gensim.similarities.docsim.Similarity`, which builds an index for a given set of documents.
Once the index is built, you can perform efficient queries like "Tell me how similar is this query document to each
document in the index?". The result is a vector of numbers as large as the size of the initial set of documents,
that is, one float for each index document. Alternatively, you can also request only the top-N most
similar index documents to the query.
How It Works
------------
The :class:`~gensim.similarities.docsim.Similarity` class splits the index into several smaller sub-indexes ("shards"),
which are disk-based. If your entire index fits in memory (~one million documents per 1GB of RAM),
you can also use the :class:`~gensim.similarities.docsim.MatrixSimilarity`
or :class:`~gensim.similarities.docsim.SparseMatrixSimilarity` classes directly.
These are more simple but do not scale as well: they keep the entire index in RAM, no sharding. They also do not
support adding new document to the index dynamically.
Once the index has been initialized, you can query for document similarity simply by
.. sourcecode:: pycon
>>> from gensim.similarities import Similarity
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> query = [(1, 2), (6, 1), (7, 2)]
>>>
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>> similarities = index[query] # get similarities between the query and all index documents
If you have more query documents, you can submit them all at once, in a batch
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> batch_of_documents = common_corpus[:] # only as example
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>>
>>> # the batch is simply an iterable of documents, aka gensim corpus:
>>> for similarities in index[batch_of_documents]:
... pass
The benefit of this batch (aka "chunked") querying is a much better performance.
To see the speed-up on your machine, run ``python -m gensim.test.simspeed``
(compare to my results `here <https://groups.google.com/g/gensim/c/9rg5zqoWyDQ/m/yk-ehhoXb08J>`_).
There is also a special syntax for when you need similarity of documents in the index
to the index itself (i.e. queries = the indexed documents themselves). This special syntax
uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>>
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
... pass
"""
import logging
import itertools
import os
import heapq
import warnings
import numpy
import scipy.sparse
from gensim import interfaces, utils, matutils
logger = logging.getLogger(__name__)
PARALLEL_SHARDS = False
try:
import multiprocessing
# by default, don't parallelize queries. uncomment the following line if you want that.
# PARALLEL_SHARDS = multiprocessing.cpu_count() # use #parallel processes = #CPus
except ImportError:
pass
class Shard(utils.SaveLoad):
"""A proxy that represents a single shard instance within :class:`~gensim.similarity.docsim.Similarity` index.
Basically just wraps :class:`~gensim.similarities.docsim.MatrixSimilarity`,
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`, etc, so that it mmaps from disk on request (query).
"""
def __init__(self, fname, index):
"""
Parameters
----------
fname : str
Path to top-level directory (file) to traverse for corpus documents.
index : :class:`~gensim.interfaces.SimilarityABC`
Index object.
"""
self.dirname, self.fname = os.path.split(fname)
self.length = len(index)
self.cls = index.__class__
logger.info("saving index shard to %s", self.fullname())
index.save(self.fullname())
self.index = self.get_index()
def fullname(self):
"""Get full path to shard file.
Return
------
str
Path to shard instance.
"""
return os.path.join(self.dirname, self.fname)
def __len__(self):
"""Get length."""
return self.length
def __getstate__(self):
"""Special handler for pickle.
Returns
-------
dict
Object that contains state of current instance without `index`.
"""
result = self.__dict__.copy()
# (S)MS objects must be loaded via load() because of mmap (simple pickle.load won't do)
if 'index' in result:
del result['index']
return result
def __str__(self):
return "%s<%i documents in %s>" % (self.cls.__name__, len(self), self.fullname())
def get_index(self):
"""Load & get index.
Returns
-------
:class:`~gensim.interfaces.SimilarityABC`
Index instance.
"""
if not hasattr(self, 'index'):
logger.debug("mmaping index from %s", self.fullname())
self.index = self.cls.load(self.fullname(), mmap='r')
return self.index
def get_document_id(self, pos):
"""Get index vector at position `pos`.
Parameters
----------
pos : int
Vector position.
Return
------
{:class:`scipy.sparse.csr_matrix`, :class:`numpy.ndarray`}
Index vector. Type depends on underlying index.
Notes
-----
The vector is of the same type as the underlying index (ie., dense for
:class:`~gensim.similarities.docsim.MatrixSimilarity`
and scipy.sparse for :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
assert 0 <= pos < len(self), "requested position out of range"
return self.get_index().index[pos]
def __getitem__(self, query):
"""Get similarities of document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document or corpus.
Returns
-------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
index = self.get_index()
try:
index.num_best = self.num_best
index.normalize = self.normalize
except Exception:
raise ValueError("num_best and normalize have to be set before querying a proxy Shard object")
return index[query]
def query_shard(args):
"""Helper for request query from shard, same as shard[query].
Parameters
---------
args : (list of (int, number), :class:`~gensim.interfaces.SimilarityABC`)
Query and Shard instances
Returns
-------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the query against documents indexed in this shard.
"""
query, shard = args # simulate starmap (not part of multiprocessing in older Pythons)
logger.debug("querying shard %s num_best=%s in process %s", shard, shard.num_best, os.getpid())
result = shard[query]
logger.debug("finished querying shard %s in process %s", shard, os.getpid())
return result
def _nlargest(n, iterable):
"""Helper for extracting n documents with maximum similarity.
Parameters
----------
n : int
Number of elements to be extracted
iterable : iterable of list of (int, float)
Iterable containing documents with computed similarities
Returns
-------
:class:`list`
List with the n largest elements from the dataset defined by iterable.
Notes
-----
Elements are compared by the absolute value of similarity, because negative value of similarity
does not mean some form of dissimilarity.
"""
return heapq.nlargest(n, itertools.chain(*iterable), key=lambda item: abs(item[1]))
class Similarity(interfaces.SimilarityABC):
"""Compute cosine similarity of a dynamic query against a corpus of documents ('the index').
The index supports adding new documents dynamically.
Notes
-----
Scalability is achieved by sharding the index into smaller pieces, each of which fits into core memory
The shards themselves are simply stored as files to disk and mmap'ed back as needed.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> query = next(iter(corpus))
>>> result = index[query] # search similar to `query` in index
>>>
>>> for sims in index[corpus]: # if you have more query documents, you can submit them all at once, in a batch
... pass
>>>
>>> # There is also a special syntax for when you need similarity of documents in the index
>>> # to the index itself (i.e. queries=indexed documents themselves). This special syntax
>>> # uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
... pass
See Also
--------
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`
Index similarity (sparse with cosine distance).
:class:`~gensim.similarities.docsim.WmdSimilarity`
Index similarity (with word-mover distance).
"""
def __init__(self, output_prefix, corpus, num_features, num_best=None, chunksize=256, shardsize=32768, norm='l2'):
"""
Parameters
----------
output_prefix : str
Prefix for shard filename. If None, a random filename in temp will be used.
corpus : iterable of list of (int, number)
Corpus in streamed Gensim bag-of-words format.
num_features : int
Size of the dictionary (number of features).
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
shardsize : int, optional
Maximum shard size, in documents. Choose a value so that a `shardsize x chunksize` matrix of floats fits
comfortably into your RAM.
norm : {'l1', 'l2'}, optional
Normalization to use.
Notes
-----
Documents are split (internally, transparently) into shards of `shardsize` documents each, and each shard
converted to a matrix, for faster BLAS calls. Each shard is stored to disk under `output_prefix.shard_number`.
If you don't specify an output prefix, a random filename in temp will be used.
If your entire index fits in memory (~1 million documents per 1GB of RAM), you can also use the
:class:`~gensim.similarities.docsim.MatrixSimilarity` or
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity` classes directly.
These are more simple but do not scale as well (they keep the entire index in RAM, no sharding).
They also do not support adding new document dynamically.
"""
if output_prefix is None:
# undocumented feature: set output_prefix=None to create the server in temp
self.output_prefix = utils.randfname(prefix='simserver')
else:
self.output_prefix = output_prefix
logger.info("starting similarity index under %s", self.output_prefix)
self.num_features = num_features
self.num_best = num_best
self.norm = norm
self.chunksize = int(chunksize)
self.shardsize = shardsize
self.shards = []
self.fresh_docs, self.fresh_nnz = [], 0
if corpus is not None:
self.add_documents(corpus)
def __len__(self):
"""Get length of index."""
return len(self.fresh_docs) + sum(len(shard) for shard in self.shards)
def __str__(self):
return "%s<%i documents in %i shards stored under %s>" % (
self.__class__.__name__, len(self), len(self.shards), self.output_prefix
)
def add_documents(self, corpus):
"""Extend the index with new documents.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
Notes
-----
Internally, documents are buffered and then spilled to disk when there's `self.shardsize` of them
(or when a query is issued).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> one_more_corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index.add_documents(one_more_corpus) # add more documents in corpus
"""
min_ratio = 1.0 # 0.5 to only reopen shards that are <50% complete
if self.shards and len(self.shards[-1]) < min_ratio * self.shardsize:
# The last shard was incomplete (<; load it back and add the documents there, don't start a new shard
self.reopen_shard()
for doc in corpus:
if isinstance(doc, numpy.ndarray):
doclen = len(doc)
elif scipy.sparse.issparse(doc):
doclen = doc.nnz
else:
doclen = len(doc)
if doclen < 0.3 * self.num_features:
doc = matutils.unitvec(matutils.corpus2csc([doc], self.num_features).T, self.norm)
else:
doc = matutils.unitvec(matutils.sparse2full(doc, self.num_features), self.norm)
self.fresh_docs.append(doc)
self.fresh_nnz += doclen
if len(self.fresh_docs) >= self.shardsize:
self.close_shard()
if len(self.fresh_docs) % 10000 == 0:
logger.info("PROGRESS: fresh_shard size=%i", len(self.fresh_docs))
def shardid2filename(self, shardid):
"""Get shard file by `shardid`.
Parameters
----------
shardid : int
Shard index.
Return
------
str
Path to shard file.
"""
if self.output_prefix.endswith('.'):
return "%s%s" % (self.output_prefix, shardid)
else:
return "%s.%s" % (self.output_prefix, shardid)
def close_shard(self):
"""Force the latest shard to close (be converted to a matrix and stored to disk).
Do nothing if no new documents added since last call.
Notes
-----
The shard is closed even if it is not full yet (its size is smaller than `self.shardsize`).
If documents are added later via :meth:`~gensim.similarities.docsim.MatrixSimilarity.add_documents`
this incomplete shard will be loaded again and completed.
"""
if not self.fresh_docs:
return
shardid = len(self.shards)
# consider the shard sparse if its density is < 30%
issparse = 0.3 > 1.0 * self.fresh_nnz / (len(self.fresh_docs) * self.num_features)
if issparse:
index = SparseMatrixSimilarity(
self.fresh_docs, num_terms=self.num_features, num_docs=len(self.fresh_docs), num_nnz=self.fresh_nnz
)
else:
index = MatrixSimilarity(self.fresh_docs, num_features=self.num_features)
logger.info("creating %s shard #%s", 'sparse' if issparse else 'dense', shardid)
shard = Shard(self.shardid2filename(shardid), index)
shard.num_best = self.num_best
shard.num_nnz = self.fresh_nnz
self.shards.append(shard)
self.fresh_docs, self.fresh_nnz = [], 0
def reopen_shard(self):
"""Reopen an incomplete shard."""
assert self.shards
if self.fresh_docs:
raise ValueError("cannot reopen a shard with fresh documents in index")
last_shard = self.shards[-1]
last_index = last_shard.get_index()
logger.info("reopening an incomplete shard of %i documents", len(last_shard))
self.fresh_docs = list(last_index.index)
self.fresh_nnz = last_shard.num_nnz
del self.shards[-1] # remove the shard from index, *but its file on disk is not deleted*
logger.debug("reopen complete")
def query_shards(self, query):
"""Apply shard[query] to each shard in `self.shards`. Used internally.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document in BoW format or corpus of documents.
Returns
-------
(None, list of individual shard query results)
Query results.
"""
args = zip([query] * len(self.shards), self.shards)
if PARALLEL_SHARDS and PARALLEL_SHARDS > 1:
logger.debug("spawning %i query processes", PARALLEL_SHARDS)
pool = multiprocessing.Pool(PARALLEL_SHARDS)
result = pool.imap(query_shard, args, chunksize=1 + len(self.shards) / PARALLEL_SHARDS)
else:
# serial processing, one shard after another
pool = None
result = map(query_shard, args)
return pool, result
def __getitem__(self, query):
"""Get similarities of the document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
A single document in bag-of-words format, or a corpus (iterable) of such documents.
Return
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the query against this index.
Notes
-----
If `query` is a corpus (iterable of documents), return a matrix of similarities of
all query documents vs. all corpus document. This batch query is more efficient than computing the similarities
one document after another.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> result = index[corpus] # pairwise similarities of each document against each document
"""
self.close_shard() # no-op if no documents added to index since last query
# reset num_best and normalize parameters, in case they were changed dynamically
for shard in self.shards:
shard.num_best = self.num_best
shard.normalize = self.norm
# there are 4 distinct code paths, depending on whether input `query` is
# a corpus (or numpy/scipy matrix) or a single document, and whether the
# similarity result should be a full array or only num_best most similar
# documents.
pool, shard_results = self.query_shards(query)
if self.num_best is None:
# user asked for all documents => just stack the sub-results into a single matrix
# (works for both corpus / single doc query)
result = numpy.hstack(list(shard_results))
else:
# the following uses a lot of lazy evaluation and (optionally) parallel
# processing, to improve query latency and minimize memory footprint.
offsets = numpy.cumsum([0] + [len(shard) for shard in self.shards])
def convert(shard_no, doc):
return [(doc_index + offsets[shard_no], sim) for doc_index, sim in doc]
is_corpus, query = utils.is_corpus(query)
is_corpus = is_corpus or hasattr(query, 'ndim') and query.ndim > 1 and query.shape[0] > 1
if not is_corpus:
# user asked for num_best most similar and query is a single doc
results = (convert(shard_no, result) for shard_no, result in enumerate(shard_results))
result = _nlargest(self.num_best, results)
else:
# the trickiest combination: returning num_best results when query was a corpus
results = []
for shard_no, result in enumerate(shard_results):
shard_result = [convert(shard_no, doc) for doc in result]
results.append(shard_result)
result = []
for parts in zip(*results):
merged = _nlargest(self.num_best, parts)
result.append(merged)
if pool:
# gc doesn't seem to collect the Pools, eventually leading to
# "IOError 24: too many open files". so let's terminate it manually.
pool.terminate()
return result
def vector_by_id(self, docpos):
"""Get the indexed vector corresponding to the document at position `docpos`.
Parameters
----------
docpos : int
Document position
Return
------
:class:`scipy.sparse.csr_matrix`
Indexed vector.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> # Create index:
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> vector = index.vector_by_id(1)
"""
self.close_shard() # no-op if no documents added to index since last query
pos = 0
for shard in self.shards:
pos += len(shard)
if docpos < pos:
break
if not self.shards or docpos < 0 or docpos >= pos:
raise ValueError("invalid document position: %s (must be 0 <= x < %s)" % (docpos, len(self)))
result = shard.get_document_id(docpos - pos + len(shard))
return result
def similarity_by_id(self, docpos):
"""Get similarity of a document specified by its index position `docpos`.
Parameters
----------
docpos : int
Document position in the index.
Return
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the given document against this index.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> similarities = index.similarity_by_id(1)
"""
query = self.vector_by_id(docpos)
norm, self.norm = self.norm, False
result = self[query]
self.norm = norm
return result
def __iter__(self):
"""For each index document in index, compute cosine similarity against all other documents in the index.
Uses :meth:`~gensim.similarities.docsim.Similarity.iter_chunks` internally.
Yields
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of each document in turn against the index.
"""
# turn off query normalization (vectors in the index are already normalized, save some CPU)
norm, self.norm = self.norm, False
for chunk in self.iter_chunks():
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
self.norm = norm # restore normalization
def iter_chunks(self, chunksize=None):
"""Iteratively yield the index as chunks of document vectors, each of size <= chunksize.
Parameters
----------
chunksize : int, optional
Size of chunk,, if None - `self.chunksize` will be used.
Yields
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Chunks of the index as 2D arrays. The arrays are either dense or sparse, depending on
whether the shard was storing dense or sparse vectors.
"""
self.close_shard()
if chunksize is None:
# if not explicitly specified, use the chunksize from the constructor
chunksize = self.chunksize
for shard in self.shards:
query = shard.get_index().index
for chunk_start in range(0, query.shape[0], chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(query.shape[0], chunk_start + chunksize)
chunk = query[chunk_start: chunk_end] # create a view
yield chunk
def check_moved(self):
"""Update shard locations, for case where the server prefix location changed on the filesystem."""
dirname = os.path.dirname(self.output_prefix)
for shard in self.shards:
shard.dirname = dirname
def save(self, fname=None, *args, **kwargs):
"""Save the index object via pickling under `fname`. See also :meth:`~gensim.docsim.Similarity.load()`.
Parameters
----------
fname : str, optional
Path for save index, if not provided - will be saved to `self.output_prefix`.
*args : object
Arguments, see :meth:`gensim.utils.SaveLoad.save`.
**kwargs : object
Keyword arguments, see :meth:`gensim.utils.SaveLoad.save`.
Notes
-----
Will call :meth:`~gensim.similarities.Similarity.close_shard` internally to spill
any unfinished shards to disk first.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> temp_fname = get_tmpfile("index")
>>> output_fname = get_tmpfile("saved_index")
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity(output_fname, corpus, num_features=400)
>>>
>>> index.save(output_fname)
>>> loaded_index = index.load(output_fname)
"""
self.close_shard()
if fname is None:
fname = self.output_prefix
super(Similarity, self).save(fname, *args, **kwargs)
def destroy(self):
"""Delete all files under self.output_prefix Index is not usable anymore after calling this method."""
import glob
for fname in glob.glob(self.output_prefix + '*'):
logger.info("deleting %s", fname)
os.remove(fname)
class MatrixSimilarity(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Unless the entire matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.similarities import MatrixSimilarity
>>>
>>> query = [(1, 2), (5, 4)]
>>> index = MatrixSimilarity(common_corpus, num_features=len(common_dictionary))
>>> sims = index[query]
"""
def __init__(self, corpus, num_best=None, dtype=numpy.float32, num_features=None, chunksize=256, corpus_len=None):
"""
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in streamed Gensim bag-of-words format.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
num_features : int
Size of the dictionary (number of features).
corpus_len : int, optional
Number of documents in `corpus`. If not specified, will scan the corpus to determine the matrix size.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
dtype : numpy.dtype, optional
Datatype to store the internal matrix in.
"""
if num_features is None:
logger.warning(
"scanning corpus to determine the number of features (consider setting `num_features` explicitly)"
)
num_features = 1 + utils.get_max_id(corpus)
self.num_features = num_features
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
if corpus_len is None:
corpus_len = len(corpus)
if corpus is not None:
if self.num_features <= 0:
raise ValueError(
"cannot index a corpus with zero features (you must specify either `num_features` "
"or a non-empty corpus in the constructor)"
)
logger.info("creating matrix with %i documents and %i features", corpus_len, num_features)
self.index = numpy.empty(shape=(corpus_len, num_features), dtype=dtype)
# iterate over corpus, populating the numpy index matrix with (normalized)
# document vectors
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.debug("PROGRESS: at document #%i/%i", docno, corpus_len)
# individual documents in fact may be in numpy.scipy.sparse format as well.
# it's not documented because other it's not fully supported throughout.
# the user better know what he's doing (no normalization, must
# explicitly supply num_features etc).
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = matutils.unitvec(matutils.sparse2full(vector, num_features))
self.index[docno] = vector
def __len__(self):
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly, use the :class:`~gensim.similarities.docsim.MatrixSimilarity.__getitem__`
instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = numpy.asarray(
[matutils.sparse2full(vec, self.num_features) for vec in query],
dtype=self.index.dtype
)
else:
if scipy.sparse.issparse(query):
query = query.toarray() # convert sparse to dense
elif isinstance(query, numpy.ndarray):
pass
else:
# default case: query is a single vector in sparse gensim format
query = matutils.sparse2full(query, self.num_features)
query = numpy.asarray(query, dtype=self.index.dtype)
# do a little transposition dance to stop numpy from making a copy of
# self.index internally in numpy.dot (very slow).
result = numpy.dot(self.index, query.T).T # return #queries x #index
return result # XXX: removed casting the result from array to list; does anyone care?
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.index.shape[1])
class SoftCosineSimilarity(interfaces.SimilarityABC):
"""Compute soft cosine similarity against a corpus of documents by storing the index matrix in memory.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix
>>> from gensim.similarities import WordEmbeddingSimilarityIndex
>>>
>>> model = Word2Vec(common_texts, vector_size=20, min_count=1) # train word-vectors
>>> termsim_index = WordEmbeddingSimilarityIndex(model.wv)
>>> dictionary = Dictionary(common_texts)
>>> bow_corpus = [dictionary.doc2bow(document) for document in common_texts]
>>> similarity_matrix = SparseTermSimilarityMatrix(termsim_index, dictionary) # construct similarity matrix
>>> docsim_index = SoftCosineSimilarity(bow_corpus, similarity_matrix, num_best=10)
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> sims = docsim_index[dictionary.doc2bow(query)] # calculate similarity of query to each doc from bow_corpus
Check out `the Gallery <https://radimrehurek.com/gensim/auto_examples/tutorials/run_scm.html>`__
for more examples.
"""
def __init__(self, corpus, similarity_matrix, num_best=None, chunksize=256, normalized=None,
normalize_queries=True, normalize_documents=True):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
similarity_matrix : :class:`gensim.similarities.SparseTermSimilarityMatrix`
A term similarity matrix.
num_best : int, optional
The number of results to retrieve for a query, if None - return similarities with all elements from corpus.
chunksize: int, optional
Size of one corpus chunk.
normalized : tuple of {True, False, 'maintain', None}, optional
A deprecated alias for `(normalize_queries, normalize_documents)`. If None, use
`normalize_queries` and `normalize_documents`. Default is None.
normalize_queries : {True, False, 'maintain'}, optional
Whether the query vector in the inner product will be L2-normalized (True; corresponds
to the soft cosine similarity measure; default), maintain their L2-norm during change of
basis ('maintain'; corresponds to queryexpansion with partial membership), or kept as-is
(False; corresponds to query expansion).
normalize_documents : {True, False, 'maintain'}, optional
Whether the document vector in the inner product will be L2-normalized (True; corresponds
to the soft cosine similarity measure; default), maintain their L2-norm during change of
basis ('maintain'; corresponds to queryexpansion with partial membership), or kept as-is
(False; corresponds to query expansion).
See Also
--------
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
A sparse term similarity matrix built using a term similarity index.
:class:`~gensim.similarities.termsim.LevenshteinSimilarityIndex`
A term similarity index that computes Levenshtein similarities between terms.
:class:`~gensim.similarities.termsim.WordEmbeddingSimilarityIndex`
A term similarity index that computes cosine similarities between word embeddings.
"""
self.similarity_matrix = similarity_matrix
self.corpus = list(corpus)
self.num_best = num_best
self.chunksize = chunksize
if normalized is not None:
warnings.warn(
'Parameter normalized will be removed in 5.0.0, use normalize_queries and normalize_documents instead',
category=DeprecationWarning,
)
self.normalized = normalized
else:
self.normalized = (normalize_queries, normalize_documents)
# Normalization of features is undesirable, since soft cosine similarity requires special
# normalization using the similarity matrix. Therefore, we would just be normalizing twice,
# increasing the numerical error.
self.normalize = False
# index is simply an array from 0 to size of corpus.
self.index = numpy.arange(len(corpus))
def __len__(self):
return len(self.corpus)
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly; use the `self[query]` syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number)}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
if not self.corpus:
return numpy.array()
is_corpus, query = utils.is_corpus(query)
if not is_corpus and isinstance(query, numpy.ndarray):
query = [self.corpus[i] for i in query] # convert document indexes to actual documents
result = self.similarity_matrix.inner_product(query, self.corpus, normalized=self.normalized)
if scipy.sparse.issparse(result):
return numpy.asarray(result.todense())
if numpy.isscalar(result):
return numpy.array(result)
return numpy.asarray(result)[0]
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.similarity_matrix.shape[0])
class WmdSimilarity(interfaces.SimilarityABC):
"""Compute negative WMD similarity against a corpus of documents.
Check out `the Gallery <https://radimrehurek.com/gensim/auto_examples/tutorials/run_wmd.html>`__
for more examples.
When using this code, please consider citing the following papers:
* `Rémi Flamary et al. "POT: Python Optimal Transport"
<https://jmlr.org/papers/v22/20-451.html>`_
* `Matt Kusner et al. "From Word Embeddings To Document Distances"
<http://proceedings.mlr.press/v37/kusnerb15.pdf>`_
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import WmdSimilarity
>>>
>>> model = Word2Vec(common_texts, vector_size=20, min_count=1) # train word-vectors
>>>
>>> index = WmdSimilarity(common_texts, model.wv)
>>> # Make query.
>>> query = ['trees']
>>> sims = index[query]
"""
def __init__(self, corpus, kv_model, num_best=None, chunksize=256):
"""
Parameters
----------
corpus: iterable of list of str
A list of documents, each of which is a list of tokens.
kv_model: :class:`~gensim.models.keyedvectors.KeyedVectors`
A set of KeyedVectors
num_best: int, optional
Number of results to retrieve.
chunksize : int, optional
Size of chunk.
"""
self.corpus = corpus
self.wv = kv_model
self.num_best = num_best
self.chunksize = chunksize
# Normalization of features is not possible, as corpus is a list (of lists) of strings.
self.normalize = False
# index is simply an array from 0 to size of corpus.
self.index = numpy.arange(len(corpus))
def __len__(self):
"""Get size of corpus."""
return len(self.corpus)
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly; use the `self[query]` syntax instead.
Parameters
----------
query : {list of str, iterable of list of str}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
if isinstance(query, numpy.ndarray):
# Convert document indexes to actual documents.
query = [self.corpus[i] for i in query]
if not query or not isinstance(query[0], list):
query = [query]
n_queries = len(query)
result = []
for qidx in range(n_queries):
# Compute similarity for each query.
qresult = [self.wv.wmdistance(document, query[qidx]) for document in self.corpus]
qresult = numpy.array(qresult)
qresult = 1. / (1. + qresult) # Similarity is the negative of the distance.
# Append single query result to list of all results.
result.append(qresult)
if len(result) == 1:
# Only one query.
result = result[0]
else:
result = numpy.array(result)
return result
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.wv.vectors.shape[1])
class SparseMatrixSimilarity(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Examples
--------
Here is how you would index and query a corpus of documents in the bag-of-words format using the
cosine similarity:
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.similarities import SparseMatrixSimilarity
>>> from gensim.test.utils import common_texts as corpus
>>>
>>> dictionary = Dictionary(corpus) # fit dictionary
>>> bow_corpus = [dictionary.doc2bow(line) for line in corpus] # convert corpus to BoW format
>>> index = SparseMatrixSimilarity(bm25_corpus, num_docs=len(corpus), num_terms=len(dictionary))
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> bow_query = dictionary.doc2bow(query)
>>> similarities = index[bow_query] # calculate similarity of query to each doc from bow_corpus
Here is how you would index and query a corpus of documents using the Okapi BM25 scoring
function:
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import TfidfModel, OkapiBM25Model
>>> from gensim.similarities import SparseMatrixSimilarity
>>> from gensim.test.utils import common_texts as corpus
>>>
>>> dictionary = Dictionary(corpus) # fit dictionary
>>> query_model = TfidfModel(dictionary=dictionary, smartirs='bnn') # enforce binary weights
>>> document_model = OkapiBM25Model(dictionary=dictionary) # fit bm25 model
>>>
>>> bow_corpus = [dictionary.doc2bow(line) for line in corpus] # convert corpus to BoW format
>>> bm25_corpus = document_model[bow_corpus]
>>> index = SparseMatrixSimilarity(bm25_corpus, num_docs=len(corpus), num_terms=len(dictionary),
... normalize_queries=False, normalize_documents=False)
>>>
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> bow_query = dictionary.doc2bow(query)
>>> bm25_query = query_model[bow_query]
>>> similarities = index[bm25_query] # calculate similarity of query to each doc from bow_corpus
Notes
-----
Use this if your input corpus contains sparse vectors (such as TF-IDF documents) and fits into RAM.
The matrix is internally stored as a :class:`scipy.sparse.csr_matrix` matrix. Unless the entire
matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Takes an optional `maintain_sparsity` argument, setting this to True
causes `get_similarities` to return a sparse matrix instead of a
dense representation if possible.
See also
--------
:class:`~gensim.similarities.docsim.Similarity`
Index similarity (wrapper for other inheritors of :class:`~gensim.interfaces.SimilarityABC`).
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
"""
def __init__(self, corpus, num_features=None, num_terms=None, num_docs=None, num_nnz=None,
num_best=None, chunksize=500, dtype=numpy.float32, maintain_sparsity=False,
normalize_queries=True, normalize_documents=True):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
num_features : int, optional
Size of the dictionary. Must be either specified, or present in `corpus.num_terms`.
num_terms : int, optional
Alias for `num_features`, you can use either.
num_docs : int, optional
Number of documents in `corpus`. Will be calculated if not provided.
num_nnz : int, optional
Number of non-zero elements in `corpus`. Will be calculated if not provided.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
dtype : numpy.dtype, optional
Data type of the internal matrix.
maintain_sparsity : bool, optional
Return sparse arrays from :meth:`~gensim.similarities.docsim.SparseMatrixSimilarity.get_similarities`?
normalize_queries : bool, optional
If queries are in bag-of-words (int, float) format, as opposed to a sparse or dense
2D arrays, they will be L2-normalized. Default is True.
normalize_documents : bool, optional
If `corpus` is in bag-of-words (int, float) format, as opposed to a sparse or dense
2D arrays, it will be L2-normalized. Default is True.
"""
self.num_best = num_best
self.normalize = normalize_queries
self.chunksize = chunksize
self.maintain_sparsity = maintain_sparsity
if corpus is not None:
logger.info("creating sparse index")
# iterate over input corpus, populating the sparse index matrix
try:
# use the more efficient corpus generation version, if the input
# `corpus` is MmCorpus-like (knows its shape and number of non-zeroes).
num_terms, num_docs, num_nnz = corpus.num_terms, corpus.num_docs, corpus.num_nnz
logger.debug("using efficient sparse index creation")
except AttributeError:
# no MmCorpus, use the slower version (or maybe user supplied the
# num_* params in constructor)
pass
if num_features is not None:
# num_terms is just an alias for num_features, for compatibility with MatrixSimilarity
num_terms = num_features
if num_terms is None:
raise ValueError("refusing to guess the number of sparse features: specify num_features explicitly")
corpus = (matutils.scipy2sparse(v) if scipy.sparse.issparse(v) else
(matutils.full2sparse(v) if isinstance(v, numpy.ndarray) else
matutils.unitvec(v) if normalize_documents else v) for v in corpus)
self.index = matutils.corpus2csc(
corpus, num_terms=num_terms, num_docs=num_docs, num_nnz=num_nnz,
dtype=dtype, printprogress=10000,
).T
# convert to Compressed Sparse Row for efficient row slicing and multiplications
self.index = self.index.tocsr() # currently no-op, CSC.T is already CSR
logger.info("created %r", self.index)
def __len__(self):
"""Get size of index."""
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly; use the `self[query]` syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix (if maintain_sparsity=False) **OR**
:class:`scipy.sparse.csc`
otherwise
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = matutils.corpus2csc(query, self.index.shape[1], dtype=self.index.dtype)
else:
if scipy.sparse.issparse(query):
query = query.T # convert documents=rows to documents=columns
elif isinstance(query, numpy.ndarray):
if query.ndim == 1:
query.shape = (1, len(query))
query = scipy.sparse.csr_matrix(query, dtype=self.index.dtype).T
else:
# default case: query is a single vector, in sparse gensim format
query = matutils.corpus2csc([query], self.index.shape[1], dtype=self.index.dtype)
# compute cosine similarity against every other document in the collection
result = self.index * query.tocsc() # N x T * T x C = N x C
if result.shape[1] == 1 and not is_corpus:
# for queries of one document, return a 1d array
result = result.toarray().flatten()
elif self.maintain_sparsity:
# avoid converting to dense array if maintaining sparsity
result = result.T
else:
# otherwise, return a 2d matrix (#queries x #index)
result = result.toarray().T
return result
| 52,783
|
Python
|
.py
| 1,075
| 39.249302
| 120
| 0.626141
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,168
|
termsim.py
|
piskvorky_gensim/gensim/similarities/termsim.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vit Novotny <witiko@mail.muni.cz>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module provides classes that deal with term similarities.
"""
from array import array
from itertools import chain
import logging
from math import sqrt
import numpy as np
from scipy import sparse
from gensim.matutils import corpus2csc
from gensim.utils import SaveLoad, is_corpus
logger = logging.getLogger(__name__)
NON_NEGATIVE_NORM_ASSERTION_MESSAGE = (
u"sparse documents must not contain any explicit "
u"zero entries and the similarity matrix S must satisfy x^T * S * x >= 0 for any "
u"nonzero bag-of-words vector x."
)
class TermSimilarityIndex(SaveLoad):
"""
Base class = common interface for retrieving the most similar terms for a given term.
See Also
--------
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
A sparse term similarity matrix built using a term similarity index.
"""
def most_similar(self, term, topn=10):
"""Get most similar terms for a given term.
Return the most similar terms for a given term along with their similarities.
Parameters
----------
term : str
The term for which we are retrieving `topn` most similar terms.
topn : int, optional
The maximum number of most similar terms to `term` that will be retrieved.
Returns
-------
iterable of (str, float)
Most similar terms along with their similarities to `term`. Only terms distinct from
`term` must be returned.
"""
raise NotImplementedError
def __str__(self):
members = ', '.join('%s=%s' % pair for pair in vars(self).items())
return '%s<%s>' % (self.__class__.__name__, members)
class UniformTermSimilarityIndex(TermSimilarityIndex):
"""
Retrieves most similar terms for a given term under the hypothesis that the similarities between
distinct terms are uniform.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies the considered terms.
term_similarity : float, optional
The uniform similarity between distinct terms.
See Also
--------
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
A sparse term similarity matrix built using a term similarity index.
Notes
-----
This class is mainly intended for testing SparseTermSimilarityMatrix and other classes that
depend on the TermSimilarityIndex.
"""
def __init__(self, dictionary, term_similarity=0.5):
self.dictionary = sorted(dictionary.items())
self.term_similarity = term_similarity
def most_similar(self, t1, topn=10):
for __, (t2_index, t2) in zip(range(topn), (
(t2_index, t2) for t2_index, t2 in self.dictionary if t2 != t1)):
yield (t2, self.term_similarity)
class WordEmbeddingSimilarityIndex(TermSimilarityIndex):
"""
Computes cosine similarities between word embeddings and retrieves most
similar terms for a given term.
Notes
-----
By fitting the word embeddings to a vocabulary that you will be using, you
can eliminate all out-of-vocabulary (OOV) words that you would otherwise
receive from the `most_similar` method. In subword models such as fastText,
this procedure will also infer word-vectors for words from your vocabulary
that previously had no word-vector.
>>> from gensim.test.utils import common_texts, datapath
>>> from gensim.corpora import Dictionary
>>> from gensim.models import FastText
>>> from gensim.models.word2vec import LineSentence
>>> from gensim.similarities import WordEmbeddingSimilarityIndex
>>>
>>> model = FastText(common_texts, vector_size=20, min_count=1) # train word-vectors on a corpus
>>> different_corpus = LineSentence(datapath('lee_background.cor'))
>>> dictionary = Dictionary(different_corpus) # construct a vocabulary on a different corpus
>>> words = [word for word, count in dictionary.most_common()]
>>> word_vectors = model.wv.vectors_for_all(words) # remove OOV word-vectors and infer word-vectors for new words
>>> assert len(dictionary) == len(word_vectors) # all words from our vocabulary received their word-vectors
>>> termsim_index = WordEmbeddingSimilarityIndex(word_vectors)
Parameters
----------
keyedvectors : :class:`~gensim.models.keyedvectors.KeyedVectors`
The word embeddings.
threshold : float, optional
Only embeddings more similar than `threshold` are considered when retrieving word embeddings
closest to a given word embedding.
exponent : float, optional
Take the word embedding similarities larger than `threshold` to the power of `exponent`.
kwargs : dict or None
A dict with keyword arguments that will be passed to the
:meth:`~gensim.models.keyedvectors.KeyedVectors.most_similar` method
when retrieving the word embeddings closest to a given word embedding.
See Also
--------
:class:`~gensim.similarities.levenshtein.LevenshteinSimilarityIndex`
Retrieve most similar terms for a given term using the Levenshtein distance.
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
Build a term similarity matrix and compute the Soft Cosine Measure.
"""
def __init__(self, keyedvectors, threshold=0.0, exponent=2.0, kwargs=None):
self.keyedvectors = keyedvectors
self.threshold = threshold
self.exponent = exponent
self.kwargs = kwargs or {}
super(WordEmbeddingSimilarityIndex, self).__init__()
def most_similar(self, t1, topn=10):
if t1 not in self.keyedvectors:
logger.debug('an out-of-dictionary term "%s"', t1)
else:
most_similar = self.keyedvectors.most_similar(positive=[t1], topn=topn, **self.kwargs)
for t2, similarity in most_similar:
if similarity > self.threshold:
yield (t2, similarity**self.exponent)
def _shortest_uint_dtype(max_value):
"""Get the shortest unsingned integer data-type required for representing values up to a given
maximum value.
Returns the shortest unsingned integer data-type required for representing values up to a given
maximum value.
Parameters
----------
max_value : int
The maximum value we wish to represent.
Returns
-------
data-type
The shortest unsigned integer data-type required for representing values up to a given
maximum value.
"""
if max_value < 2**8:
return np.uint8
elif max_value < 2**16:
return np.uint16
elif max_value < 2**32:
return np.uint32
return np.uint64
def _create_source(index, dictionary, tfidf, symmetric, dominant, nonzero_limit, dtype):
"""Build a sparse term similarity matrix using a term similarity index.
Returns
-------
matrix : :class:`scipy.sparse.coo_matrix`
The sparse term similarity matrix.
"""
assert isinstance(index, TermSimilarityIndex)
assert dictionary is not None
matrix_order = len(dictionary)
if matrix_order == 0:
raise ValueError('Dictionary provided to SparseTermSimilarityMatrix must not be empty')
logger.info("constructing a sparse term similarity matrix using %s", index)
if nonzero_limit is None:
nonzero_limit = matrix_order
def tfidf_sort_key(term_index):
if isinstance(term_index, tuple):
term_index, *_ = term_index
term_idf = tfidf.idfs[term_index]
return (-term_idf, term_index)
if tfidf is None:
columns = sorted(dictionary.keys())
logger.info("iterating over %i columns in dictionary order", len(columns))
else:
assert max(tfidf.idfs) == matrix_order - 1
columns = sorted(tfidf.idfs.keys(), key=tfidf_sort_key)
logger.info("iterating over %i columns in tf-idf order", len(columns))
nonzero_counter_dtype = _shortest_uint_dtype(nonzero_limit)
column_nonzero = np.array([0] * matrix_order, dtype=nonzero_counter_dtype)
if dominant:
column_sum = np.zeros(matrix_order, dtype=dtype)
if symmetric:
assigned_cells = set()
row_buffer = array('Q')
column_buffer = array('Q')
if dtype is np.float16 or dtype is np.float32:
data_buffer = array('f')
elif dtype is np.float64:
data_buffer = array('d')
else:
raise ValueError('Dtype %s is unsupported, use numpy.float16, float32, or float64.' % dtype)
def cell_full(t1_index, t2_index, similarity):
if dominant and column_sum[t1_index] + abs(similarity) >= 1.0:
return True # after adding the similarity, the matrix would cease to be strongly diagonally dominant
assert column_nonzero[t1_index] <= nonzero_limit
if column_nonzero[t1_index] == nonzero_limit:
return True # after adding the similarity, the column would contain more than nonzero_limit elements
if symmetric and (t1_index, t2_index) in assigned_cells:
return True # a similarity has already been assigned to this cell
return False
def populate_buffers(t1_index, t2_index, similarity):
column_buffer.append(t1_index)
row_buffer.append(t2_index)
data_buffer.append(similarity)
column_nonzero[t1_index] += 1
if symmetric:
assigned_cells.add((t1_index, t2_index))
if dominant:
column_sum[t1_index] += abs(similarity)
try:
from tqdm import tqdm as progress_bar
except ImportError:
def progress_bar(iterable):
return iterable
for column_number, t1_index in enumerate(progress_bar(columns)):
column_buffer.append(column_number)
row_buffer.append(column_number)
data_buffer.append(1.0)
if nonzero_limit <= 0:
continue
t1 = dictionary[t1_index]
num_nonzero = column_nonzero[t1_index]
num_rows = nonzero_limit - num_nonzero
most_similar = [
(dictionary.token2id[term], similarity)
for term, similarity in index.most_similar(t1, topn=num_rows)
if term in dictionary.token2id
] if num_rows > 0 else []
if tfidf is None:
rows = sorted(most_similar)
else:
rows = sorted(most_similar, key=tfidf_sort_key)
for t2_index, similarity in rows:
if cell_full(t1_index, t2_index, similarity):
continue
if not symmetric:
populate_buffers(t1_index, t2_index, similarity)
elif not cell_full(t2_index, t1_index, similarity):
populate_buffers(t1_index, t2_index, similarity)
populate_buffers(t2_index, t1_index, similarity)
data_buffer = np.frombuffer(data_buffer, dtype=dtype)
row_buffer = np.frombuffer(row_buffer, dtype=np.uint64)
column_buffer = np.frombuffer(column_buffer, dtype=np.uint64)
matrix = sparse.coo_matrix((data_buffer, (row_buffer, column_buffer)), shape=(matrix_order, matrix_order))
logger.info(
"constructed a sparse term similarity matrix with %0.06f%% density",
100.0 * matrix.getnnz() / matrix_order**2,
)
return matrix
def _normalize_dense_vector(vector, matrix, normalization):
"""Normalize a dense vector after a change of basis.
Parameters
----------
vector : 1xN ndarray
A dense vector.
matrix : NxN ndarray
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
vector : ndarray
The normalized dense vector.
"""
if not normalization:
return vector
vector_norm = vector.T.dot(matrix).dot(vector)[0, 0]
assert vector_norm >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain' and vector_norm > 0.0:
vector_norm /= vector.T.dot(vector)
vector_norm = sqrt(vector_norm)
normalized_vector = vector
if vector_norm > 0.0:
normalized_vector /= vector_norm
return normalized_vector
def _normalize_dense_corpus(corpus, matrix, normalization):
"""Normalize a dense corpus after a change of basis.
Parameters
----------
corpus : MxN ndarray
A dense corpus.
matrix : NxN ndarray
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
normalized_corpus : ndarray
The normalized dense corpus.
"""
if not normalization:
return corpus
# use the following equality: np.diag(A.T.dot(B).dot(A)) == A.T.dot(B).multiply(A.T).sum(axis=1).T
corpus_norm = np.multiply(corpus.T.dot(matrix), corpus.T).sum(axis=1).T
assert corpus_norm.min() >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain':
corpus_norm /= np.multiply(corpus.T, corpus.T).sum(axis=1).T
corpus_norm = np.sqrt(corpus_norm)
normalized_corpus = np.multiply(corpus, 1.0 / corpus_norm)
normalized_corpus = np.nan_to_num(normalized_corpus) # account for division by zero
return normalized_corpus
def _normalize_sparse_corpus(corpus, matrix, normalization):
"""Normalize a sparse corpus after a change of basis.
Parameters
----------
corpus : MxN :class:`scipy.sparse.csc_matrix`
A sparse corpus.
matrix : NxN :class:`scipy.sparse.csc_matrix`
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
normalized_corpus : :class:`scipy.sparse.csc_matrix`
The normalized sparse corpus.
"""
if not normalization:
return corpus
# use the following equality: np.diag(A.T.dot(B).dot(A)) == A.T.dot(B).multiply(A.T).sum(axis=1).T
corpus_norm = corpus.T.dot(matrix).multiply(corpus.T).sum(axis=1).T
assert corpus_norm.min() >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain':
corpus_norm /= corpus.T.multiply(corpus.T).sum(axis=1).T
corpus_norm = np.sqrt(corpus_norm)
normalized_corpus = corpus.multiply(sparse.csr_matrix(1.0 / corpus_norm))
normalized_corpus[normalized_corpus == np.inf] = 0 # account for division by zero
return normalized_corpus
class SparseTermSimilarityMatrix(SaveLoad):
"""
Builds a sparse term similarity matrix using a term similarity index.
Examples
--------
>>> from gensim.test.utils import common_texts as corpus, datapath
>>> from gensim.corpora import Dictionary
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix, WordEmbeddingSimilarityIndex
>>> from gensim.similarities.index import AnnoyIndexer
>>>
>>> model_corpus_file = datapath('lee_background.cor')
>>> model = Word2Vec(corpus_file=model_corpus_file, vector_size=20, min_count=1) # train word-vectors
>>>
>>> dictionary = Dictionary(corpus)
>>> tfidf = TfidfModel(dictionary=dictionary)
>>> words = [word for word, count in dictionary.most_common()]
>>> word_vectors = model.wv.vectors_for_all(words, allow_inference=False) # produce vectors for words in corpus
>>>
>>> indexer = AnnoyIndexer(word_vectors, num_trees=2) # use Annoy for faster word similarity lookups
>>> termsim_index = WordEmbeddingSimilarityIndex(word_vectors, kwargs={'indexer': indexer})
>>> similarity_matrix = SparseTermSimilarityMatrix(termsim_index, dictionary, tfidf) # compute word similarities
>>>
>>> tfidf_corpus = tfidf[[dictionary.doc2bow(document) for document in common_texts]]
>>> docsim_index = SoftCosineSimilarity(tfidf_corpus, similarity_matrix, num_best=10) # index tfidf_corpus
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> sims = docsim_index[dictionary.doc2bow(query)] # find the ten closest documents from tfidf_corpus
Check out `the Gallery <https://radimrehurek.com/gensim/auto_examples/tutorials/run_scm.html>`_
for more examples.
Parameters
----------
source : :class:`~gensim.similarities.termsim.TermSimilarityIndex` or :class:`scipy.sparse.spmatrix`
The source of the term similarity. Either a term similarity index that will be used for
building the term similarity matrix, or an existing sparse term similarity matrix that will
be encapsulated and stored in the matrix attribute. When a matrix is specified as the
source, any other parameters will be ignored.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary` or None, optional
A dictionary that specifies a mapping between terms and the indices of rows and columns
of the resulting term similarity matrix. The dictionary may only be None when source is
a :class:`scipy.sparse.spmatrix`.
tfidf : :class:`gensim.models.tfidfmodel.TfidfModel` or None, optional
A model that specifies the relative importance of the terms in the dictionary. The columns
of the term similarity matrix will be build in a decreasing order of importance of
terms, or in the order of term identifiers if None.
symmetric : bool, optional
Whether the symmetry of the term similarity matrix will be enforced. Symmetry is a necessary
precondition for positive definiteness, which is necessary if you later wish to derive a
unique change-of-basis matrix from the term similarity matrix using Cholesky factorization.
Setting symmetric to False will significantly reduce memory usage during matrix construction.
dominant: bool, optional
Whether the strict column diagonal dominance of the term similarity matrix will be enforced.
Strict diagonal dominance and symmetry are sufficient preconditions for positive
definiteness, which is necessary if you later wish to derive a change-of-basis matrix from
the term similarity matrix using Cholesky factorization.
nonzero_limit : int or None, optional
The maximum number of non-zero elements outside the diagonal in a single column of the
sparse term similarity matrix. If None, then no limit will be imposed.
dtype : numpy.dtype, optional
The data type of the sparse term similarity matrix.
Attributes
----------
matrix : :class:`scipy.sparse.csc_matrix`
The encapsulated sparse term similarity matrix.
Raises
------
ValueError
If `dictionary` is empty.
See Also
--------
:class:`~gensim.similarities.docsim.SoftCosineSimilarity`
A document similarity index using the soft cosine similarity over the term similarity matrix.
:class:`~gensim.similarities.termsim.LevenshteinSimilarityIndex`
A term similarity index that computes Levenshtein similarities between terms.
:class:`~gensim.similarities.termsim.WordEmbeddingSimilarityIndex`
A term similarity index that computes cosine similarities between word embeddings.
"""
def __init__(self, source, dictionary=None, tfidf=None, symmetric=True, dominant=False,
nonzero_limit=100, dtype=np.float32):
if not sparse.issparse(source):
index = source
args = (index, dictionary, tfidf, symmetric, dominant, nonzero_limit, dtype)
source = _create_source(*args)
assert sparse.issparse(source)
self.matrix = source.tocsc()
def inner_product(self, X, Y, normalized=(False, False)):
"""Get the inner product(s) between real vectors / corpora X and Y.
Return the inner product(s) between real vectors / corpora vec1 and vec2 expressed in a
non-orthogonal normalized basis, where the dot product between the basis vectors is given by
the sparse term similarity matrix.
Parameters
----------
vec1 : list of (int, float) or iterable of list of (int, float)
A query vector / corpus in the sparse bag-of-words format.
vec2 : list of (int, float) or iterable of list of (int, float)
A document vector / corpus in the sparse bag-of-words format.
normalized : tuple of {True, False, 'maintain'}, optional
First/second value specifies whether the query/document vectors in the inner product
will be L2-normalized (True; corresponds to the soft cosine measure), maintain their
L2-norm during change of basis ('maintain'; corresponds to query expansion with partial
membership), or kept as-is (False; corresponds to query expansion; default).
Returns
-------
`self.matrix.dtype`, `scipy.sparse.csr_matrix`, or :class:`numpy.matrix`
The inner product(s) between `X` and `Y`.
References
----------
The soft cosine measure was perhaps first described by [sidorovetal14]_.
Further notes on the efficient implementation of the soft cosine measure are described by
[novotny18]_.
.. [sidorovetal14] Grigori Sidorov et al., "Soft Similarity and Soft Cosine Measure: Similarity
of Features in Vector Space Model", 2014, http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/2043/1921.
.. [novotny18] Vít Novotný, "Implementation Notes for the Soft Cosine Measure", 2018,
http://dx.doi.org/10.1145/3269206.3269317.
"""
if not X or not Y:
return self.matrix.dtype.type(0.0)
normalized_X, normalized_Y = normalized
valid_normalized_values = (True, False, 'maintain')
if normalized_X not in valid_normalized_values:
raise ValueError('{} is not a valid value of normalize'.format(normalized_X))
if normalized_Y not in valid_normalized_values:
raise ValueError('{} is not a valid value of normalize'.format(normalized_Y))
is_corpus_X, X = is_corpus(X)
is_corpus_Y, Y = is_corpus(Y)
if not is_corpus_X and not is_corpus_Y:
X = dict(X)
Y = dict(Y)
word_indices = np.array(sorted(set(chain(X, Y))))
dtype = self.matrix.dtype
X = np.array([X[i] if i in X else 0 for i in word_indices], dtype=dtype)
Y = np.array([Y[i] if i in Y else 0 for i in word_indices], dtype=dtype)
matrix = self.matrix[word_indices[:, None], word_indices].todense()
X = _normalize_dense_vector(X, matrix, normalized_X)
Y = _normalize_dense_vector(Y, matrix, normalized_Y)
result = X.T.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result = np.clip(result, -1.0, 1.0)
return result[0, 0]
elif not is_corpus_X or not is_corpus_Y:
if is_corpus_X and not is_corpus_Y:
X, Y = Y, X # make Y the corpus
is_corpus_X, is_corpus_Y = is_corpus_Y, is_corpus_X
normalized_X, normalized_Y = normalized_Y, normalized_X
transposed = True
else:
transposed = False
dtype = self.matrix.dtype
expanded_X = corpus2csc([X], num_terms=self.matrix.shape[0], dtype=dtype).T.dot(self.matrix)
word_indices = np.array(sorted(expanded_X.nonzero()[1]))
del expanded_X
X = dict(X)
X = np.array([X[i] if i in X else 0 for i in word_indices], dtype=dtype)
Y = corpus2csc(Y, num_terms=self.matrix.shape[0], dtype=dtype)[word_indices, :].todense()
matrix = self.matrix[word_indices[:, None], word_indices].todense()
X = _normalize_dense_vector(X, matrix, normalized_X)
Y = _normalize_dense_corpus(Y, matrix, normalized_Y)
result = X.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result = np.clip(result, -1.0, 1.0)
if transposed:
result = result.T
return result
else: # if is_corpus_X and is_corpus_Y:
dtype = self.matrix.dtype
X = corpus2csc(X if is_corpus_X else [X], num_terms=self.matrix.shape[0], dtype=dtype)
Y = corpus2csc(Y if is_corpus_Y else [Y], num_terms=self.matrix.shape[0], dtype=dtype)
matrix = self.matrix
X = _normalize_sparse_corpus(X, matrix, normalized_X)
Y = _normalize_sparse_corpus(Y, matrix, normalized_Y)
result = X.T.dot(matrix).dot(Y)
if normalized_X is True and normalized_Y is True:
result.data = np.clip(result.data, -1.0, 1.0)
return result
| 25,808
|
Python
|
.py
| 515
| 42.18835
| 120
| 0.672399
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,169
|
__init__.py
|
piskvorky_gensim/gensim/similarities/__init__.py
|
"""
This package contains implementations of pairwise similarity queries.
"""
# bring classes directly into package namespace, to save some typing
from .levenshtein import LevenshteinSimilarityIndex # noqa:F401
from .docsim import ( # noqa:F401
Similarity,
MatrixSimilarity,
SparseMatrixSimilarity,
SoftCosineSimilarity,
WmdSimilarity)
from .termsim import ( # noqa:F401
TermSimilarityIndex,
UniformTermSimilarityIndex,
WordEmbeddingSimilarityIndex,
SparseTermSimilarityMatrix)
| 518
|
Python
|
.py
| 16
| 29.0625
| 69
| 0.800399
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,170
|
nmslib.py
|
piskvorky_gensim/gensim/similarities/nmslib.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
# Copyright (C) 2019 Masahiro Kazama <kazama.masa@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
This module integrates `NMSLIB <https://github.com/nmslib/nmslib>`_ fast similarity
search with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors`
vector embeddings.
.. Important::
To use this module, you must have the external ``nmslib`` library installed.
To install it, run ``pip install nmslib``.
To use the integration, instantiate a :class:`~gensim.similarities.nmslib.NmslibIndexer` class
and pass the instance as the `indexer` parameter to your model's `model.most_similar()` method.
Example usage
-------------
.. sourcecode:: pycon
>>> from gensim.similarities.nmslib import NmslibIndexer
>>> from gensim.models import Word2Vec
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, epochs=10, seed=2)
>>>
>>> indexer = NmslibIndexer(model)
>>> model.wv.most_similar("cat", topn=2, indexer=indexer)
[('cat', 1.0), ('meow', 0.16398882865905762)]
Load and save example
---------------------
.. sourcecode:: pycon
>>> from gensim.similarities.nmslib import NmslibIndexer
>>> from gensim.models import Word2Vec
>>> from tempfile import mkstemp
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, seed=2, epochs=10)
>>>
>>> indexer = NmslibIndexer(model)
>>> _, temp_fn = mkstemp()
>>> indexer.save(temp_fn)
>>>
>>> new_indexer = NmslibIndexer.load(temp_fn)
>>> model.wv.most_similar("cat", topn=2, indexer=new_indexer)
[('cat', 1.0), ('meow', 0.5595494508743286)]
What is NMSLIB
--------------
Non-Metric Space Library (NMSLIB) is an efficient cross-platform similarity search library and a toolkit
for evaluation of similarity search methods. The core-library does not have any third-party dependencies.
More information about NMSLIB: `github repository <https://github.com/nmslib/nmslib>`_.
Why use NMSIB?
--------------
Gensim's native :py:class:`~gensim.similarities.Similarity` for finding the `k` nearest neighbors to a vector
uses brute force and has linear complexity, albeit with extremely low constant factors.
The retrieved results are exact, which is an overkill in many applications:
approximate results retrieved in sub-linear time may be enough.
NMSLIB can find approximate nearest neighbors much faster, similar to Spotify's Annoy library.
Compared to :py:class:`~gensim.similarities.annoy.Annoy`, NMSLIB has more parameters to
control the build and query time and accuracy. NMSLIB often achieves faster and more accurate
nearest neighbors search than Annoy.
"""
# Avoid import collisions on py2: this module has the same name as the actual NMSLIB library.
from __future__ import absolute_import
import pickle as _pickle
from smart_open import open
try:
import nmslib
except ImportError:
raise ImportError("NMSLIB not installed. To use the NMSLIB indexer, please run `pip install nmslib`.")
from gensim import utils
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec
from gensim.models.fasttext import FastText
from gensim.models import KeyedVectors
class NmslibIndexer():
"""This class allows to use `NMSLIB <https://github.com/nmslib/nmslib>`_ as indexer for `most_similar` method
from :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` classes.
"""
def __init__(self, model, index_params=None, query_time_params=None):
"""
Parameters
----------
model : :class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Model, that will be used as source for index.
index_params : dict, optional
Indexing parameters passed through to NMSLIB:
https://github.com/nmslib/nmslib/blob/master/manual/methods.md#graph-based-search-methods-sw-graph-and-hnsw
If not specified, defaults to `{'M': 100, 'indexThreadQty': 1, 'efConstruction': 100, 'post': 0}`.
query_time_params : dict, optional
query_time_params for NMSLIB indexer.
If not specified, defaults to `{'efSearch': 100}`.
"""
if index_params is None:
index_params = {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100, 'post': 0}
if query_time_params is None:
query_time_params = {'efSearch': 100}
self.index = None
self.labels = None
self.model = model
self.index_params = index_params
self.query_time_params = query_time_params
#
# In the main use case, the user will pass us a non-None model, and we use that model
# to initialize the index and labels. In a separate (completely internal) use case, the
# NsmlibIndexer.load function handles the index and label initialization separately,
# so it passes us None as the model.
#
if model:
if isinstance(self.model, Doc2Vec):
self._build_from_doc2vec()
elif isinstance(self.model, (Word2Vec, FastText)):
self._build_from_word2vec()
elif isinstance(self.model, (KeyedVectors,)):
self._build_from_keyedvectors()
else:
raise ValueError("model must be a Word2Vec, Doc2Vec, FastText or KeyedVectors instance")
def save(self, fname, protocol=utils.PICKLE_PROTOCOL):
"""Save this NmslibIndexer instance to a file.
Parameters
----------
fname : str
Path to the output file,
will produce 2 files: `fname` - parameters and `fname`.d - :class:`~nmslib.NmslibIndex`.
protocol : int, optional
Protocol for pickle.
Notes
-----
This method saves **only** the index (**the model isn't preserved**).
"""
fname_dict = fname + '.d'
self.index.saveIndex(fname)
d = {'index_params': self.index_params, 'query_time_params': self.query_time_params, 'labels': self.labels}
with open(fname_dict, 'wb') as fout:
_pickle.dump(d, fout, protocol=protocol)
@classmethod
def load(cls, fname):
"""Load a NmslibIndexer instance from a file.
Parameters
----------
fname : str
Path previously used in `save()`.
"""
fname_dict = fname + '.d'
with open(fname_dict, 'rb') as f:
d = _pickle.load(f)
index_params = d['index_params']
query_time_params = d['query_time_params']
nmslib_instance = cls(model=None, index_params=index_params, query_time_params=query_time_params)
index = nmslib.init(method='hnsw', space='cosinesimil')
index.loadIndex(fname)
nmslib_instance.index = index
nmslib_instance.labels = d['labels']
return nmslib_instance
def _build_from_word2vec(self):
"""Build an NMSLIB index using word vectors from a Word2Vec model."""
self._build_from_model(self.model.wv.get_normed_vectors(), self.model.wv.index_to_key)
def _build_from_doc2vec(self):
"""Build an NMSLIB index using document vectors from a Doc2Vec model."""
docvecs = self.model.dv
labels = docvecs.index_to_key
self._build_from_model(docvecs.get_normed_vectors(), labels)
def _build_from_keyedvectors(self):
"""Build an NMSLIB index using word vectors from a KeyedVectors model."""
self._build_from_model(self.model.get_normed_vectors(), self.model.index_to_key)
def _build_from_model(self, vectors, labels):
index = nmslib.init(method='hnsw', space='cosinesimil')
index.addDataPointBatch(vectors)
index.createIndex(self.index_params, print_progress=True)
nmslib.setQueryTimeParams(index, self.query_time_params)
self.index = index
self.labels = labels
def most_similar(self, vector, num_neighbors):
"""Find the approximate `num_neighbors` most similar items.
Parameters
----------
vector : numpy.array
Vector for a word or document.
num_neighbors : int
How many most similar items to look for?
Returns
-------
list of (str, float)
List of most similar items in the format `[(item, cosine_similarity), ... ]`.
"""
ids, distances = self.index.knnQueryBatch(vector.reshape(1, -1), k=num_neighbors)[0]
# NMSLIB returns cosine distance (not similarity), which is simply `dist = 1 - cossim`.
# So, convert back to similarities here.
return [(self.labels[id_], 1.0 - distance) for id_, distance in zip(ids, distances)]
| 9,211
|
Python
|
.py
| 188
| 41.989362
| 119
| 0.663845
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,171
|
make_wiki_online_nodebug.py
|
piskvorky_gensim/gensim/scripts/make_wiki_online_nodebug.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,603
|
Python
|
.py
| 87
| 48.172414
| 118
| 0.719216
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,172
|
segment_wiki.py
|
piskvorky_gensim/gensim/scripts/segment_wiki.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayant@rare-technologies.com>
# Copyright (C) 2016 RaRe Technologies
"""This script using for extracting plain text out of a raw Wikipedia dump. Input is an xml.bz2 file provided
by MediaWiki that looks like <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2 or <LANG>wiki-latest-pages-articles.xml.bz2
(e.g. 14 GB of https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2).
It streams through all the XML articles using multiple cores (#cores - 1, by default),
decompressing on the fly and extracting plain text from the articles and their sections.
For each extracted article, it prints its title, section names and plain text section contents, in json-line format.
How to use
----------
#. Process Wikipedia dump with this script ::
python -m gensim.scripts.segment_wiki -i -f enwiki-latest-pages-articles.xml.bz2 -o enwiki-latest.json.gz
#. Read output in simple way:
.. sourcecode:: pycon
>>> from gensim import utils
>>> import json
>>>
>>> # iterate over the plain text data we just created
>>> with utils.open('enwiki-latest.json.gz', 'rb') as f:
>>> for line in f:
>>> # decode each JSON line into a Python dictionary object
>>> article = json.loads(line)
>>>
>>> # each article has a "title", a mapping of interlinks and a list of "section_titles" and
>>> # "section_texts".
>>> print("Article title: %s" % article['title'])
>>> print("Interlinks: %s" + article['interlinks'])
>>> for section_title, section_text in zip(article['section_titles'], article['section_texts']):
>>> print("Section title: %s" % section_title)
>>> print("Section text: %s" % section_text)
Notes
-----
Processing the entire English Wikipedia dump takes 1.7 hours (about 3 million articles per hour,
or 10 MB of XML per second) on an 8 core Intel i7-7700 @3.60GHz.
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.segment_wiki --help
:ellipsis: 0, -10
"""
import argparse
import json
import logging
import multiprocessing
import re
import sys
from xml.etree import ElementTree
from functools import partial
from gensim.corpora.wikicorpus import IGNORED_NAMESPACES, WikiCorpus, filter_wiki, find_interlinks, get_namespace, utils
import gensim.utils
logger = logging.getLogger(__name__)
def segment_all_articles(file_path, min_article_character=200, workers=None, include_interlinks=False):
"""Extract article titles and sections from a MediaWiki bz2 database dump.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
Yields
------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
with gensim.utils.open(file_path, 'rb') as xml_fileobj:
wiki_sections_corpus = _WikiSectionsCorpus(
xml_fileobj, min_article_character=min_article_character, processes=workers,
include_interlinks=include_interlinks)
wiki_sections_corpus.metadata = True
wiki_sections_text = wiki_sections_corpus.get_texts_with_sections()
for article in wiki_sections_text:
yield article
def segment_and_write_all_articles(file_path, output_file, min_article_character=200, workers=None,
include_interlinks=False):
"""Write article title and sections to `output_file` (or stdout, if output_file is None).
The output format is one article per line, in json-line format with 4 fields::
'title' - title of article,
'section_titles' - list of titles of sections,
'section_texts' - list of content from sections,
(Optional) 'section_interlinks' - list of interlinks in the article.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
output_file : str or None
Path to output file in json-lines format, or None for printing to stdout.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
if output_file is None:
outfile = getattr(sys.stdout, 'buffer', sys.stdout) # we want write bytes, so for py3 we used 'buffer'
else:
outfile = gensim.utils.open(output_file, 'wb')
try:
article_stream = segment_all_articles(file_path, min_article_character, workers=workers,
include_interlinks=include_interlinks)
for idx, article in enumerate(article_stream):
article_title, article_sections = article[0], article[1]
if include_interlinks:
interlinks = article[2]
output_data = {
"title": article_title,
"section_titles": [],
"section_texts": [],
}
if include_interlinks:
output_data["interlinks"] = interlinks
for section_heading, section_content in article_sections:
output_data["section_titles"].append(section_heading)
output_data["section_texts"].append(section_content)
if (idx + 1) % 100000 == 0:
logger.info("processed #%d articles (at %r now)", idx + 1, article_title)
outfile.write((json.dumps(output_data) + "\n").encode('utf-8'))
finally:
if output_file is not None:
outfile.close()
def extract_page_xmls(f):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File descriptor of MediaWiki dump.
Yields
------
str
XML strings for page tags.
"""
elems = (elem for _, elem in ElementTree.iterparse(f, events=("end",)))
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
yield ElementTree.tostring(elem)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
def segment(page_xml, include_interlinks=False):
"""Parse the content inside a page tag
Parameters
----------
page_xml : str
Content from page tag.
include_interlinks : bool
Whether or not interlinks should be parsed.
Returns
-------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
elem = ElementTree.fromstring(page_xml)
filter_namespaces = ('0',)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
lead_section_heading = "Introduction"
top_level_heading_regex = r"\n==[^=].*[^=]==\n"
top_level_heading_regex_capture = r"\n==([^=].*[^=])==\n"
title = elem.find(title_path).text
text = elem.find(text_path).text
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
if text is not None:
if include_interlinks:
interlinks = find_interlinks(text)
section_contents = re.split(top_level_heading_regex, text)
section_headings = [lead_section_heading] + re.findall(top_level_heading_regex_capture, text)
section_headings = [heading.strip() for heading in section_headings]
assert len(section_contents) == len(section_headings)
else:
interlinks = []
section_contents = []
section_headings = []
section_contents = [filter_wiki(section_content) for section_content in section_contents]
sections = list(zip(section_headings, section_contents))
if include_interlinks:
return title, sections, interlinks
else:
return title, sections
class _WikiSectionsCorpus(WikiCorpus):
"""Treat a wikipedia articles dump (<LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
"""
def __init__(self, fileobj, min_article_character=200, processes=None,
lemmatize=None, filter_namespaces=('0',), include_interlinks=False):
"""
Parameters
----------
fileobj : file
File descriptor of MediaWiki dump.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
processes : int, optional
Number of processes, max(1, multiprocessing.cpu_count() - 1) if None.
filter_namespaces : tuple of int, optional
Enumeration of namespaces that will be ignored.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
if lemmatize is not None:
raise NotImplementedError(
'The lemmatize parameter is no longer supported since Gensim 4.0.0. '
'If you need to lemmatize, use e.g. https://github.com/clips/pattern '
'to preprocess your corpus before submitting it to Gensim.'
)
self.fileobj = fileobj
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.min_article_character = min_article_character
self.include_interlinks = include_interlinks
def get_texts_with_sections(self):
"""Iterate over the dump, returning titles and text versions of all sections of articles.
Notes
-----
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function:
.. sourcecode:: pycon
>>> for vec in wiki_corpus:
>>> print(vec)
Yields
------
(str, list of (str, str), list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally)[(interlink_article, interlink_text), ...]).
"""
skipped_namespace, skipped_length, skipped_redirect = 0, 0, 0
total_articles, total_sections = 0, 0
page_xmls = extract_page_xmls(self.fileobj)
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(page_xmls, chunksize=10 * self.processes, maxsize=1):
for article in pool.imap(partial(segment, include_interlinks=self.include_interlinks),
group): # chunksize=10): partial(merge_names, b='Sons')
article_title, sections = article[0], article[1]
# article redirects are pruned here
if any(article_title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES): # filter non-articles
skipped_namespace += 1
continue
if not sections or sections[0][1].lstrip().lower().startswith("#redirect"): # filter redirect
skipped_redirect += 1
continue
if sum(len(body.strip()) for (_, body) in sections) < self.min_article_character:
# filter stubs (incomplete, very short articles)
skipped_length += 1
continue
total_articles += 1
total_sections += len(sections)
if self.include_interlinks:
interlinks = article[2]
yield (article_title, sections, interlinks)
else:
yield (article_title, sections)
logger.info(
"finished processing %i articles with %i sections (skipped %i redirects, %i stubs, %i ignored namespaces)",
total_articles, total_sections, skipped_redirect, skipped_length, skipped_namespace)
pool.terminate()
self.length = total_articles # cache corpus length
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=__doc__[:-136])
default_workers = max(1, multiprocessing.cpu_count() - 1)
parser.add_argument('-f', '--file', help='Path to MediaWiki database dump (read-only).', required=True)
parser.add_argument(
'-o', '--output',
help='Path to output file (stdout if not specified). If ends in .gz or .bz2, '
'the output file will be automatically compressed (recommended!).')
parser.add_argument(
'-w', '--workers',
help='Number of parallel workers for multi-core systems. Default: %(default)s.',
type=int,
default=default_workers
)
parser.add_argument(
'-m', '--min-article-character',
help="Ignore articles with fewer characters than this (article stubs). Default: %(default)s.",
type=int,
default=200
)
parser.add_argument(
'-i', '--include-interlinks',
help='Include a mapping for interlinks to other articles in the dump. The mappings format is: '
'"interlinks": [("article_title_1", "interlink_text_1"), ("article_title_2", "interlink_text_2"), ...]',
action='store_true'
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
segment_and_write_all_articles(
args.file, args.output,
min_article_character=args.min_article_character,
workers=args.workers,
include_interlinks=args.include_interlinks
)
logger.info("finished running %s", sys.argv[0])
| 15,638
|
Python
|
.py
| 323
| 39.866873
| 120
| 0.637133
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,173
|
glove2word2vec.py
|
piskvorky_gensim/gensim/scripts/glove2word2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Manas Ranjan Kar <manasrkar91@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""This script allows to convert GloVe vectors into the word2vec. Both files are
presented in text format and almost identical except that word2vec includes
number of vectors and its dimension which is only difference regard to GloVe.
Notes
-----
GloVe format (a real example can be found on the
`Stanford site <https://nlp.stanford.edu/projects/glove/>`_) ::
word1 0.123 0.134 0.532 0.152
word2 0.934 0.412 0.532 0.159
word3 0.334 0.241 0.324 0.188
...
word9 0.334 0.241 0.324 0.188
Word2Vec format (a real example can be found in the
`old w2v repository <https://code.google.com/archive/p/word2vec/>`_) ::
9 4
word1 0.123 0.134 0.532 0.152
word2 0.934 0.412 0.532 0.159
word3 0.334 0.241 0.324 0.188
...
word9 0.334 0.241 0.324 0.188
How to use
----------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.models import KeyedVectors
>>> from gensim.scripts.glove2word2vec import glove2word2vec
>>>
>>> glove_file = datapath('test_glove.txt')
>>> tmp_file = get_tmpfile("test_word2vec.txt")
>>>
>>> _ = glove2word2vec(glove_file, tmp_file)
>>>
>>> model = KeyedVectors.load_word2vec_format(tmp_file)
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.glove2word2vec --help
:ellipsis: 0, -5
"""
import sys
import logging
import argparse
from gensim import utils
from gensim.utils import deprecated
from gensim.models.keyedvectors import KeyedVectors
logger = logging.getLogger(__name__)
def get_glove_info(glove_file_name):
"""Get number of vectors in provided `glove_file_name` and dimension of vectors.
Parameters
----------
glove_file_name : str
Path to file in GloVe format.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
with utils.open(glove_file_name, 'rb') as f:
num_lines = sum(1 for _ in f)
with utils.open(glove_file_name, 'rb') as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims
@deprecated("KeyedVectors.load_word2vec_format(.., binary=False, no_header=True) loads GLoVE text vectors.")
def glove2word2vec(glove_input_file, word2vec_output_file):
"""Convert `glove_input_file` in GloVe format to word2vec format and write it to `word2vec_output_file`.
Parameters
----------
glove_input_file : str
Path to file in GloVe format.
word2vec_output_file: str
Path to output file.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
glovekv = KeyedVectors.load_word2vec_format(glove_input_file, binary=False, no_header=True)
num_lines, num_dims = len(glovekv), glovekv.vector_size
logger.info("converting %i vectors from %s to %s", num_lines, glove_input_file, word2vec_output_file)
glovekv.save_word2vec_format(word2vec_output_file, binary=False)
return num_lines, num_dims
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input", required=True, help="Path to input file in GloVe format")
parser.add_argument("-o", "--output", required=True, help="Path to output file")
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
num_lines, num_dims = glove2word2vec(args.input, args.output)
logger.info('Converted model with %i vectors and %i dimensions', num_lines, num_dims)
| 3,985
|
Python
|
.py
| 95
| 37.726316
| 118
| 0.690853
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,174
|
word2vec_standalone.py
|
piskvorky_gensim/gensim/scripts/word2vec_standalone.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s -train CORPUS -output VECTORS -size SIZE -window WINDOW
-cbow CBOW -sample SAMPLE -hs HS -negative NEGATIVE -threads THREADS -iter ITER
-min_count MIN-COUNT -alpha ALPHA -binary BINARY -accuracy FILE
Trains a neural embedding model on text file CORPUS.
Parameters essentially reproduce those used by the original C tool
(see https://code.google.com/archive/p/word2vec/).
Parameters for training:
-train <file>
Use text data from <file> to train the model
-output <file>
Use <file> to save the resulting word vectors / word clusters
-size <int>
Set size of word vectors; default is 100
-window <int>
Set max skip length between words; default is 5
-sample <float>
Set threshold for occurrence of words. Those that appear with higher frequency in the training data
will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)
-hs <int>
Use Hierarchical Softmax; default is 0 (not used)
-negative <int>
Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)
-threads <int>
Use <int> threads (default 3)
-iter <int>
Run more training iterations (default 5)
-min_count <int>
This will discard words that appear less than <int> times; default is 5
-alpha <float>
Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW
-binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
-cbow <int>
Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)
-accuracy <file>
Compute accuracy of the resulting model analogical inference power on questions file <file>
See an example of questions file
at https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt
Example: python -m gensim.scripts.word2vec_standalone -train data.txt \
-output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3
"""
import logging
import os.path
import sys
import argparse
from numpy import seterr
from gensim.models.word2vec import Word2Vec, LineSentence # avoid referencing __main__ in pickle
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s", " ".join(sys.argv))
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled; "
"default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 3)", type=int, default=3)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-alpha", help="Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW",
type=float
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
if not args.alpha:
args.alpha = 0.025
else:
skipgram = 0
if not args.alpha:
args.alpha = 0.05
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, vector_size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, alpha=args.alpha, sg=skipgram,
hs=args.hs, negative=args.negative, cbow_mean=1, epochs=args.iter,
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train.split('.')[0]
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
questions_file = args.accuracy
model.accuracy(questions_file)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
| 5,997
|
Python
|
.py
| 124
| 40.41129
| 118
| 0.65152
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,175
|
make_wiki_online.py
|
piskvorky_gensim/gensim/scripts/make_wiki_online.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Market format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,600
|
Python
|
.py
| 87
| 48.172414
| 118
| 0.719216
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,176
|
benchmark.py
|
piskvorky_gensim/gensim/scripts/benchmark.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Radim Rehurek <me@radimrehurek.com>
"""
Help script (template) for benchmarking. Run with:
/usr/bin/time --format "%E elapsed\n%Mk peak RAM" python -m gensim.scripts.benchmark ~/gensim-data/text9/text9.txt
"""
import logging
import sys
from gensim.models.word2vec import Text8Corpus, LineSentence # noqa: F401
from gensim.models import FastText, Word2Vec, Doc2Vec, Phrases # noqa: F401
from gensim import __version__
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s [%(processName)s/%(process)d] [%(levelname)s] %(name)s:%(lineno)d: %(message)s',
level=logging.INFO,
)
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
corpus = Text8Corpus(sys.argv[1]) # text8/text9 format from https://mattmahoney.net/dc/textdata.html
cls = FastText
cls(corpus, workers=12, epochs=1).save(f'/tmp/{cls.__name__}.gensim{__version__}')
| 1,037
|
Python
|
.py
| 25
| 37.76
| 116
| 0.676647
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,177
|
make_wiki.py
|
piskvorky_gensim/gensim/scripts/make_wiki.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates several files:
* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format
* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`
* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matrix Market format
* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,553
|
Python
|
.py
| 85
| 48.823529
| 118
| 0.717372
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,178
|
make_wikicorpus.py
|
piskvorky_gensim/gensim/scripts/make_wikicorpus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates several files:
* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format
* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`
* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matrix Market format
* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,553
|
Python
|
.py
| 85
| 48.823529
| 118
| 0.717372
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,179
|
package_info.py
|
piskvorky_gensim/gensim/scripts/package_info.py
|
"""Get basic information about gensim & dependencies (useful for bug-reporting).
Examples
--------
You can use it through python
.. sourcecode:: pycon
>>> from gensim.scripts.package_info import package_info
>>>
>>> info = package_info()
or using CLI interface
::
python -m gensim.scripts.package_info --info
.. program-output:: python -m gensim.scripts.package_info --help
:ellipsis: 0, -4
"""
import argparse
import platform
import sys
import os
import numpy
import scipy
import gensim
def package_info():
"""Get the versions of Gensim and its dependencies,
the location where Gensim is installed and platform on which the system is running.
Returns
-------
dict of (str, str)
Dictionary containing the versions of Gensim, Python, NumPy, SciPy and platform information.
"""
return {
"Platform": platform.platform(),
"Python": sys.version.replace("\n", ', '),
"NumPy": numpy.__version__,
"SciPy": scipy.__version__,
"Gensim": gensim.__version__,
"Location": os.path.abspath(__file__),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__[:-65], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--info", help="Information about Gensim package", action="store_true")
args = parser.parse_args()
if args.info:
print("Gensim installation information\n")
for (k, v) in sorted(package_info().items()):
print("{}: {}".format(k, v))
| 1,546
|
Python
|
.py
| 45
| 29.666667
| 117
| 0.666218
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,180
|
word2vec2tensor.py
|
piskvorky_gensim/gensim/scripts/word2vec2tensor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vimig Socrates <vimig.socrates@gmail.com>
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""This script allows converting word-vectors from word2vec format into Tensorflow 2D tensor and metadata format.
This script used for word-vector visualization on `Embedding Visualization <http://projector.tensorflow.org/>`_.
How to use
----------
#. Convert your word-vector with this script (for example, we'll use model from
`gensim-data <https://rare-technologies.com/new-download-api-for-pretrained-nlp-models-and-datasets-in-gensim/>`_) ::
python -m gensim.downloader -d glove-wiki-gigaword-50 # download model in word2vec format
python -m gensim.scripts.word2vec2tensor -i ~/gensim-data/glove-wiki-gigaword-50/glove-wiki-gigaword-50.gz \
-o /tmp/my_model_prefix
#. Open http://projector.tensorflow.org/
#. Click "Load Data" button from the left menu.
#. Select "Choose file" in "Load a TSV file of vectors." and choose "/tmp/my_model_prefix_tensor.tsv" file.
#. Select "Choose file" in "Load a TSV file of metadata." and choose "/tmp/my_model_prefix_metadata.tsv" file.
#. ???
#. PROFIT!
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.word2vec2tensor --help
:ellipsis: 0, -7
"""
import os
import sys
import logging
import argparse
import gensim
from gensim import utils
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""Convert file in Word2Vec format and writes two files 2D tensor TSV file.
File "tensor_filename"_tensor.tsv contains word-vectors, "tensor_filename"_metadata.tsv contains words.
Parameters
----------
word2vec_model_path : str
Path to file in Word2Vec format.
tensor_filename : str
Prefix for output files.
binary : bool, optional
True if input file in binary format.
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with utils.open(outfiletsv, 'wb') as file_vector, utils.open(outfiletsvmeta, 'wb') as file_metadata:
for word in model.index_to_key:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(gensim.utils.to_utf8(vector_row) + gensim.utils.to_utf8('\n'))
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__[:-138])
parser.add_argument("-i", "--input", required=True, help="Path to input file in word2vec format")
parser.add_argument("-o", "--output", required=True, help="Prefix path for output files")
parser.add_argument(
"-b", "--binary", action='store_const', const=True, default=False,
help="Set this flag if word2vec model in binary format (default: %(default)s)"
)
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
| 3,848
|
Python
|
.py
| 71
| 49.492958
| 120
| 0.707967
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,181
|
installwheel.py
|
piskvorky_gensim/.github/workflows/installwheel.py
|
"""Install a wheel for the current platform."""
import os
import platform
import subprocess
import sys
def main():
subdir = sys.argv[1]
vi = sys.version_info
if platform.system() in ('Linux', 'Darwin'):
arch = 'x86_64'
else:
arch = 'amd64'
want = f'-cp{vi.major}{vi.minor}-'
suffix = f'_{arch}.whl'
files = sorted(os.listdir(subdir))
for f in files:
if want in f and f.endswith(suffix):
command = [sys.executable, '-m', 'pip', 'install', os.path.join(subdir, f)]
subprocess.check_call(command)
return 0
print(f'no matches for {want} / {suffix} in {subdir}:')
print('\n'.join(files))
return 1
if __name__ == '__main__':
sys.exit(main())
| 754
|
Python
|
.py
| 25
| 24.44
| 87
| 0.595271
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,182
|
update_index.py
|
piskvorky_gensim/.github/workflows/update_index.py
|
"""Update index.html for the bucket listing
http://gensim-wheels.s3-website-us-east-1.amazonaws.com/
We do this ourselves as opposed to using wheelhouse_uploader because it's
much faster this way (seconds as compared to nearly an hour).
"""
import sys
import boto3
def main():
bucket = sys.argv[1]
prefix = sys.argv[2]
client = boto3.client('s3')
print("<html><body><ul>")
paginator = client.get_paginator('list_objects_v2')
for page in paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix):
for content in page.get('Contents', []):
key = content['Key']
#
# NB. use double quotes in href because that's that
# wheelhouse_uploader expects.
#
# https://github.com/ogrisel/wheelhouse-uploader/blob/eb32a7bb410769bb4212a9aa7fb3bfa3cef1aaec/wheelhouse_uploader/fetch.py#L15
#
print(f"""<li><a href="{key}">{key}</a></li>""")
print("</ul></body></html>")
if __name__ == '__main__':
main()
| 1,038
|
Python
|
.py
| 26
| 33.346154
| 139
| 0.639083
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,183
|
test_wheel.py
|
piskvorky_gensim/.github/workflows/test_wheel.py
|
#!/usr/bin/env python
"""Test a Gensim wheel stored on S3.
Downloads the wheel, installs it into a fresh working environment, and then runs gensim tests.
usage:
python test_wheel.py <url> $(which python3.10)
where the URL comes from http://gensim-wheels.s3-website-us-east-1.amazonaws.com/
"""
import argparse
import io
import os
import subprocess
import tempfile
import urllib.parse
import urllib.request
import shutil
import sys
curr_dir = os.path.dirname(os.path.abspath(__file__))
def run(*command, **kwargs):
print("-" * 70, file=sys.stderr)
print(" ".join(command), file=sys.stderr)
print("-" * 70, file=sys.stderr)
subprocess.check_call(command, **kwargs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("wheel_path", help="The location of the wheel. May be a URL or local path")
parser.add_argument("python", help="Which python binary to use to test the wheel")
parser.add_argument("--gensim-path", default=os.path.expanduser("~/git/gensim"), help="Where the gensim repo lives")
parser.add_argument("--keep", action="store_true", help="Do not delete the sandbox after testing")
parser.add_argument("--test", default="test", help="Specify which tests to run")
args = parser.parse_args()
_, python_version = subprocess.check_output([args.python, "--version"]).decode().strip().split(" ", 1)
try:
tmpdir = tempfile.mkdtemp(prefix=f"test_wheel-py{python_version}-")
tmp_test_path = os.path.join(tmpdir, "test")
shutil.copytree(os.path.join(args.gensim_path, "gensim/test"), tmp_test_path)
if args.wheel_path.startswith("http://") or args.wheel_path.startswith("https://"):
parsed = urllib.parse.urlparse(args.wheel_path)
filename = parsed.path.split('/')[-1]
wheel_path = os.path.join(tmpdir, filename)
urllib.request.urlretrieve(args.wheel_path, wheel_path)
else:
wheel_path = args.wheel_path
env_path = os.path.join(tmpdir, "env")
run("virtualenv", "-p", args.python, env_path)
python_exe = os.path.join(tmpdir, "env/bin/python")
run(python_exe, "-m", "pip", "install", wheel_path)
run(python_exe, "-m", "pip", "install", "mock", "pytest", "testfixtures")
pytest_exe = os.path.join(tmpdir, "env/bin/pytest")
run(pytest_exe, "-vvv", args.test, "--durations", "0", cwd=tmpdir)
finally:
if args.keep:
print(f"keeping {tmpdir}, remove it yourself when done")
else:
shutil.rmtree(tmpdir)
if __name__ == "__main__":
main()
| 2,623
|
Python
|
.py
| 56
| 40.839286
| 120
| 0.661563
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,184
|
check_wheels.py
|
piskvorky_gensim/release/check_wheels.py
|
# -*- coding: utf-8 -*-
#
# Authors: Michael Penkov <m@penkov.dev>
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Check that our wheels are all there."""
import os
import os.path
import re
import sys
#
# We expect this to be set as part of the release process.
#
release = os.environ['RELEASE']
assert re.match(r'^\d+.\d+.\d+', release), 'expected %r to be in major.minor.bugfix format'
dist_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'dist')
dist_path = os.path.abspath(dist_path)
assert os.path.isdir(dist_path), 'expected %r to be an existing subdirectory' % dist_path
expected = [
'gensim-%(release)s-cp310-cp310-macosx_10_9_x86_64.whl',
'gensim-%(release)s-cp310-cp310-macosx_11_0_arm64.whl',
'gensim-%(release)s-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'gensim-%(release)s-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl',
'gensim-%(release)s-cp310-cp310-win_amd64.whl',
'gensim-%(release)s-cp310-cp310-win_arm64.whl',
'gensim-%(release)s-cp311-cp311-macosx_10_9_x86_64.whl',
'gensim-%(release)s-cp311-cp311-macosx_11_0_arm64.whl',
'gensim-%(release)s-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'gensim-%(release)s-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl',
'gensim-%(release)s-cp311-cp311-win_amd64.whl',
'gensim-%(release)s-cp311-cp311-win_arm64.whl',
'gensim-%(release)s-cp38-cp38-macosx_10_9_x86_64.whl',
'gensim-%(release)s-cp38-cp38-macosx_11_0_arm64.whl',
'gensim-%(release)s-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'gensim-%(release)s-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl',
'gensim-%(release)s-cp38-cp38-win_amd64.whl',
'gensim-%(release)s-cp39-cp39-macosx_10_9_x86_64.whl',
'gensim-%(release)s-cp39-cp39-macosx_11_0_arm64.whl',
'gensim-%(release)s-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl',
'gensim-%(release)s-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl',
'gensim-%(release)s-cp39-cp39-win_amd64.whl',
'gensim-%(release)s-cp39-cp39-win_arm64.whl',
'gensim-%(release)s.tar.gz',
]
fail = False
for f in expected:
wheel_path = os.path.join(dist_path, f % dict(release=release))
if not os.path.isfile(wheel_path):
print('FAIL: %s' % wheel_path)
fail = True
if not fail:
print('OK')
sys.exit(1 if fail else 0)
| 2,530
|
Python
|
.py
| 53
| 44.283019
| 95
| 0.705787
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,185
|
annotate_pr.py
|
piskvorky_gensim/release/annotate_pr.py
|
"""Helper script for including change log entries in an open PR.
Automatically constructs the change log entry from the PR title.
Copies the entry to the window manager clipboard.
Opens the change log belonging to the specific PR in a browser window.
All you have to do is paste and click "commit changes".
"""
import json
import sys
import webbrowser
import smart_open
def copy_to_clipboard(text):
try:
import pyperclip
except ImportError:
print('pyperclip <https://pypi.org/project/pyperclip/> is missing.', file=sys.stderr)
print('copy-paste the following text manually:', file=sys.stderr)
print('\t', text, file=sys.stderr)
else:
pyperclip.copy(text)
prid = int(sys.argv[1])
url = "https://api.github.com/repos/RaRe-Technologies/gensim/pulls/%d" % prid
with smart_open.open(url) as fin:
prinfo = json.load(fin)
prinfo['user_login'] = prinfo['user']['login']
prinfo['user_html_url'] = prinfo['user']['html_url']
text = '[#%(number)s](%(html_url)s): %(title)s, by [@%(user_login)s](%(user_html_url)s)' % prinfo
copy_to_clipboard(text)
prinfo['head_repo_html_url'] = prinfo['head']['repo']['html_url']
prinfo['head_ref'] = prinfo['head']['ref']
edit_url = '%(head_repo_html_url)s/edit/%(head_ref)s/CHANGELOG.md' % prinfo
webbrowser.open(edit_url)
| 1,312
|
Python
|
.py
| 31
| 39.258065
| 97
| 0.709348
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,186
|
update_changelog.py
|
piskvorky_gensim/release/update_changelog.py
|
"""Updates the changelog with PRs merged since the last version."""
import datetime
import json
import os.path
import sys
import requests
URL = 'https://api.github.com/repos/RaRe-Technologies/gensim'
def summarize_prs(since_version):
"""Go through all closed PRs, summarize those merged after the previous release.
Yields one-line summaries of each relevant PR as a string.
"""
releases = requests.get(URL + '/releases').json()
most_recent_release = releases[0]['tag_name']
assert most_recent_release == since_version, 'unexpected most_recent_release: %r' % most_recent_release
published_at = releases[0]['published_at']
pulls = requests.get(URL + '/pulls', params={'state': 'closed'}).json()
for pr in pulls:
merged_at = pr['merged_at']
if merged_at is None or merged_at < published_at:
continue
summary = "* {msg} (__[{author}]({author_url})__, [#{pr}]({purl}))".format(
msg=pr['title'],
author=pr['user']['login'],
author_url=pr['user']['html_url'],
pr=pr['number'],
purl=pr['html_url'],
)
print(summary)
yield summary
def main():
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
previous_version, new_version = sys.argv[1:3]
path = os.path.join(root, 'CHANGELOG.md')
with open(path) as fin:
contents = fin.read().split('\n')
header, contents = contents[:2], contents[2:]
header.append('## %s, %s\n' % (new_version, datetime.date.today().isoformat()))
header.append("""
### :star2: New Features
### :red_circle: Bug fixes
### :books: Tutorial and doc improvements
### :+1: Improvements
### :warning: Deprecations (will be removed in the next major release)
**COPY-PASTE DEPRECATIONS FROM THE PREVIOUS RELEASE HERE**
Please organize the PR summaries from below into the above sections
You may remove empty sections. Be sure to include all deprecations.
""")
header += list(summarize_prs(previous_version))
with open(path, 'w') as fout:
fout.write('\n'.join(header + contents))
if __name__ == '__main__':
main()
| 2,171
|
Python
|
.py
| 52
| 36.230769
| 108
| 0.648855
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,187
|
generate_changelog.py
|
piskvorky_gensim/release/generate_changelog.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gensim Contributors
# Copyright (C) 2020 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
"""Generate changelog entries for all PRs merged since the last release."""
import re
import requests
import sys
import time
def throttle_get(*args, seconds=10, **kwargs):
print(args, kwargs, file=sys.stderr)
result = requests.get(*args, **kwargs)
result.raise_for_status()
# Avoid Github API throttling; see https://github.com/RaRe-Technologies/gensim/pull/3203#issuecomment-887453109
time.sleep(seconds)
return result
#
# The releases get sorted in reverse chronological order, so the first release
# in the list is the most recent.
#
get = throttle_get('https://api.github.com/repos/RaRe-Technologies/gensim/releases')
most_recent_release = get.json()[0]
release_timestamp = most_recent_release['published_at']
def iter_merged_prs(since=release_timestamp):
page = 1
while True:
get = throttle_get(
'https://api.github.com/repos/RaRe-Technologies/gensim/pulls',
params={'state': 'closed', 'page': page},
)
pulls = get.json()
count = 0
for i, pr in enumerate(pulls):
if pr['merged_at'] and pr['merged_at'] > since:
count += 1
yield pr
if count == 0:
break
page += 1
def iter_closed_issues(since=release_timestamp):
page = 1
while True:
get = throttle_get(
'https://api.github.com/repos/RaRe-Technologies/gensim/issues',
params={'state': 'closed', 'page': page, 'since': since},
)
issues = get.json()
if not issues:
break
count = 0
for i, issue in enumerate(issues):
#
# In the github API, all pull requests are issues, but not vice versa.
#
if 'pull_request' not in issue and issue['closed_at'] > since:
count += 1
yield issue
if count == 0:
break
page += 1
fixed_issue_numbers = set()
for pr in iter_merged_prs(since=release_timestamp):
pr['user_login'] = pr['user']['login']
pr['user_html_url'] = pr['user']['html_url']
print('* [#%(number)d](%(html_url)s): %(title)s, by [@%(user_login)s](%(user_html_url)s)' % pr)
#
# Unfortunately, the GitHub API doesn't link PRs to issues that they fix,
# so we have do it ourselves.
#
if pr['body'] is None:
#
# Weird edge case, PR with no body
#
continue
for match in re.finditer(r'fix(es)? #(?P<number>\d+)\b', pr['body'], flags=re.IGNORECASE):
fixed_issue_numbers.add(int(match.group('number')))
print()
print('### :question: Closed issues')
print()
print('TODO: move each issue to its appropriate section or delete if irrelevant')
print()
for issue in iter_closed_issues(since=release_timestamp):
if 'pull_request' in issue or issue['number'] in fixed_issue_numbers:
continue
print('* [#%(number)d](%(html_url)s): %(title)s' % issue)
| 3,185
|
Python
|
.py
| 87
| 30.034483
| 115
| 0.622844
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,188
|
bump_version.py
|
piskvorky_gensim/release/bump_version.py
|
"""
Bump the version of Gensim in all the required places.
Usage: python3 bump_version.py <OLD_VERSION> <NEW_VERSION>
Example:
python3 bump_version.py "4.0.0beta" "4.0.0rc1"
"""
import os.path
import re
import sys
def bump(path, pattern, repl, check=True):
with open(path) as fin:
contents = fin.read()
new_contents = pattern.sub(repl, contents)
if check and new_contents == contents:
print('*' * 79)
print('WARNING: contents of %r unchanged after version bump' % path)
print('*' * 79)
with open(path, 'w') as fout:
fout.write(new_contents)
def bump_setup_py(root, previous_version, new_version):
path = os.path.join(root, 'setup.py')
pattern = re.compile("^ version='%s',$" % previous_version, re.MULTILINE)
repl = " version='%s'," % new_version
bump(path, pattern, repl)
def bump_docs_src_conf_py(root, previous_version, new_version):
path = os.path.join(root, 'docs', 'src', 'conf.py')
short_previous_version = '.'.join(previous_version.split('.')[:2])
short_new_version = new_version # '.'.join(new_version.split('.')[:2])
pattern = re.compile("^version = '%s'$" % short_previous_version, re.MULTILINE)
repl = "version = '%s'" % short_new_version
bump(path, pattern, repl, check=False) # short version won't always change
pattern = re.compile("^release = '%s'$" % previous_version, re.MULTILINE)
repl = "release = '%s'" % new_version
bump(path, pattern, repl)
def bump_gensim_init_py(root, previous_version, new_version):
path = os.path.join(root, 'gensim', '__init__.py')
pattern = re.compile("__version__ = '%s'$" % previous_version, re.MULTILINE)
repl = "__version__ = '%s'" % new_version
bump(path, pattern, repl)
def main():
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
previous_version, new_version = sys.argv[1:3]
bump_setup_py(root, previous_version, new_version)
bump_docs_src_conf_py(root, previous_version, new_version)
bump_gensim_init_py(root, previous_version, new_version)
if __name__ == '__main__':
main()
| 2,132
|
Python
|
.py
| 47
| 40.680851
| 83
| 0.652132
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,189
|
hijack_pr.py
|
piskvorky_gensim/release/hijack_pr.py
|
#!/usr/bin/env python
"""Hijack a PR to add commits as a maintainer.
This is a two-step process:
1. Add a git remote that points to the contributor's repo
2. Check out the actual contribution by reference
As a maintainer, you can add changes by making new commits and pushing them
back to the remote.
An example session:
$ release/hijack_pr.py 1234
$ git merge upstream/develop # or any other changes you want to make
$ release/hijack_pr.py push
The above commands would check out the code for the PR, make changes to them, and push them back.
Obviously, this requires the PR to be writable, but most gensim PRs are.
If they aren't, then leave it up to the PR author to make the required changes.
Sometimes, we'll make upstream changes that we want to merge into existing PRs.
This is particularly useful when some nagging build problem is affecting multiple PRs.
We can achieve this with:
$ release/hijack_pr.py merge-upstream-into 1234
This hijacks the PR and merges upstream/develop into it.
"""
import json
import subprocess
import sys
import smart_open
def check_output(command):
return subprocess.check_output(command).strip().decode('utf-8')
def push():
command = "git rev-parse --abbrev-ref HEAD@{upstream}".split()
remote, remote_branch = check_output(command).split('/')
current_branch = check_output(['git', 'branch', '--show-current'])
subprocess.check_call(['git', 'push', remote, f'{current_branch}:{remote_branch}'])
#
# Cleanup to prevent remotes and branches from piling up
#
subprocess.check_call(['git', 'checkout', 'develop'])
subprocess.check_call(['git', 'branch', '--delete', current_branch])
subprocess.check_call(['git', 'remote', 'remove', remote])
def hijack(prid):
url = f"https://api.github.com/repos/RaRe-Technologies/gensim/pulls/{prid}"
with smart_open.open(url) as fin:
prinfo = json.load(fin)
user = prinfo['head']['user']['login']
ssh_url = prinfo['head']['repo']['ssh_url']
remotes = check_output(['git', 'remote']).split('\n')
if user not in remotes:
subprocess.check_call(['git', 'remote', 'add', user, ssh_url])
subprocess.check_call(['git', 'fetch', user])
ref = prinfo['head']['ref']
subprocess.check_call(['git', 'checkout', f'{user}/{ref}'])
#
# Prefix the local branch name with the user to avoid naming clashes with
# existing branches, e.g. develop
#
subprocess.check_call(['git', 'switch', '-c', f'{user}_{ref}'])
#
# Set the upstream so we can push back to it more easily
#
subprocess.check_call(['git', 'branch', '--set-upstream-to', f'{user}/{ref}'])
def main():
if sys.argv[1] == "push":
push()
elif sys.argv[1] == 'merge-upstream-into':
prid = int(sys.argv[2])
hijack(prid)
subprocess.check_call(['git', 'fetch', 'upstream'])
subprocess.check_call(['git', 'merge', 'upstream/develop', '--no-edit'])
push()
else:
prid = int(sys.argv[1])
hijack(prid)
if __name__ == '__main__':
main()
| 3,099
|
Python
|
.py
| 72
| 38.388889
| 97
| 0.674
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,190
|
to_python.py
|
piskvorky_gensim/docs/src/tools/to_python.py
|
"""Convert a Jupyter notebook to Python source in Sphinx Gallery format.
How to use:
$ pip install m2r
$ cat tutorial.ipynb | python to_python.py > tutorial.py
That will do the bulk of the conversion for you.
Stuff that you'll need to change yourself:
* Replace the placeholder with a unique RST label,
* Replace the placeholder with a decent tutorial title, and
* Little tweaks to make Sphinx happy.
YMMV ;)
"""
import json
import sys
import m2r
def write_docstring(fout):
fout.write('''r"""
Autogenerated docstring
=======================
Please replace me.
"""
''')
def process_markdown(source, fout):
def gen():
for markdown_line in source:
rst_lines = m2r.convert(markdown_line).split('\n')
skip_flag = True
for line in rst_lines:
if line == '' and skip_flag and False:
#
# Suppress empty lines at the start of each section, they
# are not needed.
#
continue
yield line
skip_flag = bool(line)
for line in gen():
fout.write('# %s\n' % line)
def output_cell(cell, fout):
if cell['cell_type'] == 'code':
for line in cell['source']:
fout.write(line.replace('%time ', ''))
elif cell['cell_type'] == 'markdown':
fout.write('#' * 79 + '\n')
process_markdown(cell['source'], fout)
fout.write('\n\n')
def main():
write_docstring(sys.stdout)
notebook = json.load(sys.stdin)
for cell in notebook['cells']:
output_cell(cell, sys.stdout)
if __name__ == '__main__':
main()
| 1,677
|
Python
|
.pyt
| 52
| 25.230769
| 77
| 0.589152
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,191
|
ldamodel_python_3_5.id2word
|
piskvorky_gensim/gensim/test/test_data/ldamodel_python_3_5.id2word
|
Äcgensim.corpora.dictionary
Dictionary
q )Åq}q(X id2tokenq}q(K X humanqKX interfaceqKX computerqKX surveyqKX systemq KX responseq
KX timeqKX userqKX epsq
K X treesqK
X graphqKX minorsquX num_posqKX dfsq}q(K KKKKKKKKKKKKKKKKKK KK
KKKuX num_nnzqKX num_docsqK X token2idq}q(hKh
KhKh
KhKh KhKhK hKhK
hKhK uub.
| 430
|
Python
|
.pyt
| 8
| 52.875
| 124
| 0.598109
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,192
|
ldamodel_python_2_7.state
|
piskvorky_gensim/gensim/test/test_data/ldamodel_python_2_7.state
|
€cgensim.models.ldamodel
LdaState
q)�q}q(U__numpysq]U__recursive_saveloadsq]U
__ignoredsq]Usstatsqcnumpy.core.multiarray
_reconstruct
qcnumpy
ndarray
q K …Ub‡Rq
(KKK†cnumpy
dtype
qUf8K K‡Rq(KU<NNNJÿÿÿÿJÿÿÿÿK tb‰UÀ 3ÇÛú…?¶<•+T‡?ºÛŸ_‡?pÒô]-Ï…?œ[XêÔpğ?E
ƒ?ÕL(˜-Ï…?ç
Àf3…?�»©“ƒ?™Ukñ@Ť.Cö@¼Í\Sßîÿ?î™qH
Ôÿ?‡Õ¨õWÑÿ?ÖçIÀ@Ñÿ?[D¥aÔÿ?ÆHO+Vï?½áşùõì@f¯Ï¤aÔÿ?ãõ?™Ìê@¥ˆ¬òÙØÿ?
ÍÈùU)}?Twö·¢ys?íC2£¬ q?tbUetaq
hh K …Ub‡Rq(KK…h‰U` à? à? à? à? à? à? à? à? à? à? à? à?tbUnumdocsqK U__scipysq]ub.
| 588
|
Python
|
.pyt
| 15
| 38.266667
| 184
| 0.559233
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,193
|
ldamodel_python_3_5.state
|
piskvorky_gensim/gensim/test/test_data/ldamodel_python_3_5.state
|
Äcgensim.models.ldamodel
LdaState
q )Åq}q(X
__ignoredsq]qX __scipysq]qX __numpysq]qX numdocsq K X etaq
cnumpy.core.multiarray
_reconstruct
qcnumpy
ndarray
qK Öq
c_codecs
encode
qX bqX latin1qÜqRqáqRq(KKÖqcnumpy
dtype
qX f8qK KáqRq(KX <qNNNJˇˇˇˇJˇˇˇˇK tqbâhXl √†? √†? √†? √†? √†? √†? √†? √†? √†? √†? √†? √†?qhÜqRqtqbX __recursive_saveloadsq ]q!X sstatsq"hhK Öq#háq$Rq%(KKKÜq&hâhX 3√á√õ√∫¬Ö?¬∂<¬ï+T¬á?¬∫√õ¬ü_¬á?p√í√¥]-√è¬Ö?¬ú[X√™√îp√∞?E
ƒ?ÕL(˜-υ?ç
Àf3…?­»©“ƒ?™Ukñ@Ť.Cö@¼Í\Sßîÿ?î™qH
Ôÿ?‡Õ¨õWÑÿ?ÖçIÀ@Ñÿ?[D¥aÔÿ?ÆHO+Vï?½áþùõì@f¯Ï¤aÔÿ?ãõ?™Ìê@¥ˆ¬òÙØÿ?
√ç√à√πU)}?Tw√∂¬∑¬¢ys?√≠C2¬£¬¨ q?q'hÜq(Rq)tq*bub.
| 825
|
Python
|
.pyt
| 17
| 47.588235
| 326
| 0.467244
|
piskvorky/gensim
| 15,546
| 4,374
| 408
|
LGPL-2.1
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,194
|
ldamodel_python_3_5
|
piskvorky_gensim/gensim/test/test_data/ldamodel_python_3_5
|
€cgensim.models.ldamodel
LdaModel
q )�q}q(X chunksizeqMĞX optimize_etaq‰X distributedq‰X update_everyqKX
iterationsqK2X optimize_alphaq‰X
eval_everyq K
X etaq
cnumpy.core.multiarray
_reconstruct
qcnumpy
ndarray
qK …q
c_codecs
encode
qX bqX latin1q†qRq‡qRq(KK…qcnumpy
dtype
qX f8qK K‡qRq(KX <qNNNJÿÿÿÿJÿÿÿÿK tqb‰hXl à ? à ? à ? à ? à ? à ? à ? à ? à ? à ? à ? à ?qh†qRqtqbX num_termsq KX num_updatesq!K X per_word_topicsq"‰X random_stateq#cnumpy.random
__RandomState_ctor
q$)Rq%(X MT19937q&hhK …q'h‡q(Rq)(KMp…q*hX u4q+K K‡q,Rq-(KhNNNJÿÿÿÿJÿÿÿÿK tq.b‰hX“ t6ùdSÕé5\ÕÂ�ÆÃ�æiýÔß/ÙVuaºÂ�}±pf8…§kµ‰¹¦XÃéüm3·ªðOŠúOZÂ�–ò}ÂçıLiQMü`è_„¤x»ô¶0V½ÊIçêà ÃÂ�Â�R܉dú{¤ø¦“WiÀ&c±Â� ðjkÃ�¤Ó®Zôw[Ëè«)!iTZ|‚/0sÜ*Ë8Â�|š ¹@E–›ìuc:q”ù°·Lvãªçkû¿K¯b–:Fg؃r‰áÂ7ƒA2Ÿ2.³ŸrGªåâûï¬ÑöÓ9ۉ7±D9nùõ|Â�i
éêúîUêB¾á‡<v�ÄdÖzÄ
]w+c�nè¹½œ²«œä'|�™Q.þ�Œ�RûK¨}&4pp& ›Ÿ¡“�ô\ Ü×À6@‹Æ $¢úÓºn,vßµe�ӆõG�‹�j›êj}rü˜& |