repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
nmap-generation | nmap-generation-main/generators/utils.py | import enum
from math import ceil
from tokenize import Number
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def show_images(images, max_row=None):
num_images = len(images)
fig, axes = None, None
if max_row:
fig, axes = plt.subplots(ncols=max_row, nrows=ceil(num_images/max_row))
else:
fig, axes = plt.subplots(ncols=num_images)
axes = axes.ravel()
for i in range(num_images):
cmap = None
image = images[i]
if isinstance(images[i][1], matplotlib.colors.LinearSegmentedColormap):
cmap = images[i][1]
image = images[i][0]
axes[i].imshow(image, cmap=cmap)
fig.tight_layout()
plt.show()
def overwrite_channel(a, b, ch=0):
if np.isscalar(b[0,0]):
a[:,:,ch] = b[:,:]
else:
a[:,:,ch] = b[:,:,ch]
def normalize(vec):
norm = np.linalg.norm(vec)
if norm > 0:
return np.array(vec) / np.linalg.norm(vec)
else:
return np.array(vec)
def normalize_img(img):
for i, row in enumerate(img):
for j, pixel in enumerate(row):
img[i][j] = normalize(pixel)
return img
def rescale_0to1(img):
_img = np.array(img)
_min = np.amin(img)
if _min < 0:
_img += abs(_min)
else:
_img -= abs(_min)
_max = np.amax(_img)
if _max != 0:
_img /= _max
return _img
| 1,405 | 21.677419 | 79 | py |
nmap-generation | nmap-generation-main/generators/multiangle_generator.py | from tkinter import image_names
from skimage.io import imread
from skimage.color import rgba2rgb, rgb2gray
import numpy as np
from generator import Generator
from utils import rescale_0to1, show_images
class MultiangleGenerator(Generator):
def __init__(self):
super().__init__(
folder='D:/git/nmap-generation/assets/4ilum',
img_file='knight.png',
normal_file_suffix='_multiangle_normal'
)
def _load_imgs(self):
f_name, f_ext = self.IMG_FILE.split('.')
_format = lambda suff : f'{f_name}_{suff}.{f_ext}'
input_file_names = {
'up': _format('up'),
'down': _format('down'),
'left': _format('left'),
'right': _format('right')
}
input_files = {}
for k, input_name in input_file_names.items():
input_files[k] = imread(f'{self.FOLDER}/{input_name}')
return input_files
def _apply_filters(self, imgs, normal_intensity=1):
in_shape = imgs['up'].shape
out_shape = (in_shape[0], in_shape[1], 3)
imgs_gray = {}
for k, img in imgs.items():
# remove alpha
if img.shape[2] == 3:
imgs_gray[k] = rgb2gray(img)
else:
imgs_gray[k] = rgb2gray(rgba2rgb(img))
self._register_img_to_plot([img, 'gray'])
# above-green | left-red
img_al = np.zeros(out_shape)
img_al[:,:,1] = self._rescale_channel(imgs_gray['up'], 0, 0.5)
img_al[:,:,0] = self._rescale_channel(imgs_gray['left'], 0, 0.5)
img_al = -1 * img_al + 0.5
self._register_img_to_plot(img_al)
# below-green | right-red
img_br = np.zeros(out_shape)
img_br[:,:,1] = self._rescale_channel(imgs_gray['down'], 0.51, 1)
img_br[:,:,0] = self._rescale_channel(imgs_gray['right'], 0.51, 1)
self._register_img_to_plot(img_br)
# overlay blend
output_img = np.zeros(out_shape)
for i in range(img_al.shape[0]):
for j in range(img_al.shape[1]):
upper_layer_px = img_br[i,j]
lower_layer_px = img_al[i,j]
if np.sum(upper_layer_px)/3.0 < 0.5:
output_img[i,j] = 2 * lower_layer_px * upper_layer_px
else:
output_img[i,j] = 1 - 2 * (1 - lower_layer_px) * (1 - upper_layer_px)
self._register_img_to_plot(np.copy(output_img))
# revert green channel to fix normal direction
output_img[:,:,1] = 1 - output_img[:,:,1]
# set blue channel
output_img[:,:,2] = 0.9
self._register_img_to_plot(output_img)
return output_img
def _rescale_channel(self, img, min, max):
img_01 = rescale_0to1(img)
return img_01 * (max - min) + min
def run(self, normal_intensity = 0.5):
imgs = self._load_imgs()
img_normal = self._apply_filters(
imgs=imgs,
normal_intensity=np.clip(1 - normal_intensity, 0,1)
)
show_images(self._imgs_to_plot, max_row=2)
self._save_img(img_normal)
print(f'{self.FOLDER}/{self.IMG_FILE} {self.FOLDER}/{self.NORMAL_FILE}')
self._run_renderer()
if __name__ == '__main__':
MultiangleGenerator().run(.9)
| 3,343 | 31.466019 | 91 | py |
nmap-generation | nmap-generation-main/generators/sobel_filter.py | from utils import overwrite_channel, normalize_img, rescale_0to1
import numpy as np
from scipy.signal import convolve2d as conv2d
def merge_gradients(sobel_v, sobel_h, intensity=1.0):
out_height, out_width = sobel_v.shape
sobel = np.full((out_height, out_width, 3), intensity, dtype=np.float64) # b = intensity
overwrite_channel(sobel, sobel_h / 2, 0) # r = vertical
overwrite_channel(sobel, sobel_v / 2, 1) # g = horizontal
return rescale_0to1(
normalize_img(sobel)
)
def partial_gradients(img_grayscale):
sobel_operator_vert = -np.array([[1, 2, 1],
[0, 0, 0],
[-1,-2, -1]])
img_sobel_vert = conv2d(img_grayscale, sobel_operator_vert, mode='same')/2
sobel_operator_hor = np.array([[1,0,-1],
[2,0,-2],
[1,0,-1]])
img_sobel_hor = conv2d(img_grayscale, sobel_operator_hor, mode='same')/2
return img_sobel_vert, img_sobel_hor
def sobel_filter(img_grayscale, intensity=1.0):
"""
Returns:
tuple:
img_merged_sobel: range 0 .. 1
"""
img_sobel_vert, img_sobel_hor = partial_gradients(img_grayscale)
img_merged_sobel = merge_gradients(img_sobel_vert, img_sobel_hor, intensity)
return img_merged_sobel, img_sobel_vert, img_sobel_hor
def normalized_sobel_filter(img_grayscale, intensity=1.0):
"""
Returns:
tuple:
img_merged_sobel: range 0 .. 1
"""
img_sobel_vert, img_sobel_hor = partial_gradients(img_grayscale)
img_merged_sobel = merge_gradients(img_sobel_vert, img_sobel_hor, intensity)
return img_merged_sobel, img_sobel_vert, img_sobel_hor
def sobel_gradient(img_sobel_v, img_sobel_h):
return np.sqrt(np.power(img_sobel_v, 2) + np.power(img_sobel_h, 2))
| 1,946 | 36.442308 | 96 | py |
nmap-generation | nmap-generation-main/generators/generator.py | from skimage.io import imread, imsave
import matplotlib.pyplot as plt
import numpy as np
import os
from utils import show_images
GODOT_EXECUTABLE = 'Godot_v3.5.1-stable_win64.exe'
REPOSITORY_PATH = 'D:/github/nmap-generation'
class Generator:
def __init__(self,
folder='D:/git/nmap-generation/assets',
img_file='knight.png',
normal_file_suffix='_normal'):
self.FOLDER = folder
self.IMG_FILE = img_file
self.IMG_NAME, self.IMG_EXT = self.IMG_FILE.split('.')
self.NORMAL_FILE = f'{self.IMG_NAME}{normal_file_suffix}.{self.IMG_EXT}'
self._imgs_to_plot = []
def _register_img_to_plot(self, *args):
for arg in args:
if isinstance(arg[1], str) and arg[1] == 'gray':
self._imgs_to_plot.append([arg[0], plt.cm.get_cmap('gray')])
elif len(np.array(arg).shape) == 2: # default single channel to gray
self._imgs_to_plot.append([arg, plt.cm.get_cmap('gray')])
else:
self._imgs_to_plot.append(arg)
# ----- STAGES ----- to be overridden
def _load_img(self):
return imread(f'{self.FOLDER}/{self.IMG_FILE}')
def _apply_filters(self, img, normal_intensity=1.0):
self._register_img_to_plot(img)
return img
def _save_img(self, img_normal):
imsave(f'{self.FOLDER}/{self.NORMAL_FILE}', img_normal)
def _run_renderer(self):
# run godot renderer
print('skipping renderer')
return # TODO: Fix Scene not running
cmdline_str = f'{GODOT_EXECUTABLE} --path {REPOSITORY_PATH}/renderer {REPOSITORY_PATH}/renderer/renderer3d/Renderer3D.tscn {self.FOLDER}/{self.IMG_FILE} {self.FOLDER}/{self.NORMAL_FILE}'
print(cmdline_str)
os.system(cmdline_str)
# ----- RUN -----
def run(self, normal_intensity = 0.5):
"""
Parameters:
normal_instensity (float): 0 ... 1
"""
img = self._load_img()
img_normal = self._apply_filters(
img=img,
normal_intensity=np.clip(1 - normal_intensity, 0,1)
)
show_images(self._imgs_to_plot, max_row=2)
self._save_img(img_normal)
self._run_renderer()
if __name__ == '__main__':
Generator().run()
| 2,345 | 30.702703 | 194 | py |
poincare_glove | poincare_glove-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Run with:
sudo python ./setup.py install
"""
import os
import sys
import warnings
import ez_setup
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
if sys.version_info[:2] < (2, 7) or (sys.version_info[:1] == 3 and sys.version_info[:2] < (3, 5)):
raise Exception('This version of gensim needs Python 2.7, 3.5 or later.')
ez_setup.use_setuptools()
# the following code is adapted from tornado's setup.py:
# https://github.com/tornadoweb/tornado/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up word2vec and doc2vec training, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for gensim to run,
although they do result in significant speed improvements for some modules.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"Extension modules" +
"There was an issue with your platform configuration - see above.")
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"The %s extension module" % (name,) +
"The output above this warning shows how the compilation failed.")
# the following is needed to be able to add numpy's include dirs... without
# importing numpy directly in this script, before it's actually installed!
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
model_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')
gensim_dir = os.path.join(os.path.dirname(__file__), 'gensim')
cmdclass = {'build_ext': custom_build_ext}
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
OPT_LEVEL="-O3"
LONG_DESCRIPTION = u"""
==============================================
gensim -- Topic Modelling in Python
==============================================
|Travis|_
|Wheel|_
.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg
.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg
.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim
.. _Downloads: https://pypi.python.org/pypi/gensim
.. _License: http://radimrehurek.com/gensim/about.html
.. _Wheel: https://pypi.python.org/pypi/gensim
Gensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.
Target audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.
Features
---------
* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),
* **Intuitive interfaces**
* easy to plug in your own input corpus/datastream (trivial streaming API)
* easy to extend with other Vector Space algorithms (trivial transformation API)
* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,
**Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.
* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.
* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.
If this feature list left you scratching your head, you can first read more about the `Vector
Space Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised
document analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.
Installation
------------
This software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.
You must have them installed prior to installing `gensim`.
It is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.
The simple way to install `gensim` is::
pip install -U gensim
Or, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package,
you'd run::
python setup.py test
python setup.py install
For alternative modes of installation (without root privileges, development
installation, optional install features), see the `install documentation <http://radimrehurek.com/gensim/install.html>`_.
This version has been tested under Python 2.7, 3.5 and 3.6. Support for Python 2.6, 3.3 and 3.4 was dropped in gensim 1.0.0. Install gensim 0.13.4 if you *must* use Python 2.6, 3.3 or 3.4. Support for Python 2.5 was dropped in gensim 0.10.0; install gensim 0.9.1 if you *must* use Python 2.5). Gensim's github repo is hooked against `Travis CI for automated testing <https://travis-ci.org/RaRe-Technologies/gensim>`_ on every commit push and pull request.
How come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?
--------------------------------------------------------------------------------------------------------
Many scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).
Memory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.
Documentation
-------------
* `QuickStart`_
* `Tutorials`_
* `Tutorial Videos`_
* `Official Documentation and Walkthrough`_
Citing gensim
-------------
When `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::
@inproceedings{rehurek_lrec,
title = {{Software Framework for Topic Modelling with Large Corpora}},
author = {Radim {\\v R}eh{\\r u}{\\v r}ek and Petr Sojka},
booktitle = {{Proceedings of the LREC 2010 Workshop on New
Challenges for NLP Frameworks}},
pages = {45--50},
year = 2010,
month = May,
day = 22,
publisher = {ELRA},
address = {Valletta, Malta},
language={English}
}
----------------
Gensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.
Copyright (c) 2009-now Radim Rehurek
|Analytics|_
.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name
.. _Analytics: https://github.com/igrigorik/ga-beacon
.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/
.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials
.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos
.. _QuickStart: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/gensim%20Quick%20Start.ipynb
"""
distributed_env = ['Pyro4 >= 4.27']
win_testenv = [
'pytest',
'pytest-rerunfailures',
'mock',
'cython',
'pyemd',
'testfixtures',
'scikit-learn',
'Morfessor==2.0.2a4',
]
linux_testenv = win_testenv + [
'annoy',
'tensorflow <= 1.3.0',
'keras >= 2.0.4',
]
setup(
name='gensim',
version='3.4.0',
description='Python framework for fast Vector Space Modelling',
long_description=LONG_DESCRIPTION,
ext_modules=[
Extension('gensim.models.word2vec_inner',
sources=['./gensim/models/word2vec_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.models.doc2vec_inner',
sources=['./gensim/models/doc2vec_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.corpora._mmreader',
extra_compile_args=[OPT_LEVEL],
sources=['./gensim/corpora/_mmreader.c']),
Extension('gensim.models.fasttext_inner',
sources=['./gensim/models/fasttext_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.models._utils_any2vec',
sources=['./gensim/models/_utils_any2vec.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim._matutils',
extra_compile_args=[OPT_LEVEL],
sources=['./gensim/_matutils.c']),
],
cmdclass=cmdclass,
packages=find_packages(),
author=u'Radim Rehurek',
author_email='me@radimrehurek.com',
url='http://radimrehurek.com/gensim',
download_url='http://pypi.python.org/pypi/gensim',
license='LGPLv2.1',
keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '
'LSA, LSI, Latent Dirichlet Allocation, LDA, '
'Hierarchical Dirichlet Process, HDP, Random Projections, '
'TFIDF, word2vec',
platforms='any',
zip_safe=False,
classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
test_suite="gensim.test",
setup_requires=[
'numpy >= 1.11.3'
],
install_requires=[
'numpy >= 1.11.3',
'scipy >= 0.18.1',
'six >= 1.5.0',
'smart_open >= 1.2.1',
],
tests_require=linux_testenv,
extras_require={
'distributed': distributed_env,
'test-win': win_testenv,
'test': linux_testenv,
'docs': linux_testenv + distributed_env + ['sphinx', 'sphinxcontrib-napoleon', 'plotly', 'pattern', 'sphinxcontrib.programoutput'],
},
include_package_data=True,
)
| 12,941 | 38.218182 | 455 | py |
poincare_glove | poincare_glove-master/ez_setup.py | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.3.2"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name) # noqa:F821
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| 12,221 | 29.103448 | 87 | py |
poincare_glove | poincare_glove-master/gensim/nosy.py | #!/usr/bin/env python
"""
A simple testrunner for nose (or anything else).
Watch for changes in all file types specified in 'EXTENSIONS'.
If changes, run test executable in 'EXECUTABLE', with default
arguments 'DEFAULTARGS'.
The --with-color option needs the "rudolf" nose plugin. See:
http://pypi.python.org/pypi/rudolf/
Originally by Jeff Winkler, http://jeffwinkler.net
Forked from wkral http://github.com/wkral/Nosy
"""
import os
import stat
import time
import datetime
import sys
import fnmatch
EXTENSIONS = ['*.py']
EXECUTABLE = 'nosetests test/'
DEFAULTARGS = '--with-color -exe' # -w tests'
def check_sum():
"""
Return a long which can be used to know if any .py files have changed.
"""
val = 0
for root, dirs, files in os.walk(os.getcwd()):
for extension in EXTENSIONS:
for f in fnmatch.filter(files, extension):
stats = os.stat(os.path.join(root, f))
val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return val
if __name__ == '__main__':
val = 0
try:
while True:
if check_sum() != val:
val = check_sum()
os.system('%s %s %s' % (EXECUTABLE, DEFAULTARGS, ' '.join(sys.argv[1:])))
print(datetime.datetime.now().__str__())
print('=' * 77)
time.sleep(1)
except KeyboardInterrupt:
print('Goodbye')
| 1,407 | 24.6 | 89 | py |
poincare_glove | poincare_glove-master/gensim/downloader.py | """
This module is an API for downloading, getting information and loading datasets/models.
Give information about available models/datasets:
>>> import gensim.downloader as api
>>>
>>> api.info() # return dict with info about available models/datasets
>>> api.info("text8") # return dict with info about "text8" dataset
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>> from gensim.models import Word2Vec
>>>
>>> dataset = api.load("text8") # load dataset as iterable
>>> model = Word2Vec(dataset) # train w2v model
Also, this API available via CLI::
python -m gensim.downloader --info <dataname> # same as api.info(dataname)
python -m gensim.downloader --download <dataname> # same as api.load(dataname, return_path=True)
"""
from __future__ import absolute_import
import argparse
import os
import json
import logging
import sys
import errno
import hashlib
import math
import shutil
import tempfile
from functools import partial
if sys.version_info[0] == 2:
import urllib
from urllib2 import urlopen
else:
import urllib.request as urllib
from urllib.request import urlopen
user_dir = os.path.expanduser('~')
base_dir = os.path.join(user_dir, 'gensim-data')
logger = logging.getLogger('gensim.api')
DATA_LIST_URL = "https://raw.githubusercontent.com/RaRe-Technologies/gensim-data/master/list.json"
DOWNLOAD_BASE_URL = "https://github.com/RaRe-Technologies/gensim-data/releases/download"
def _progress(chunks_downloaded, chunk_size, total_size, part=1, total_parts=1):
"""Reporthook for :func:`urllib.urlretrieve`, code from [1]_.
Parameters
----------
chunks_downloaded : int
Number of chunks of data that have been downloaded.
chunk_size : int
Size of each chunk of data.
total_size : int
Total size of the dataset/model.
part : int, optional
Number of current part, used only if `no_parts` > 1.
total_parts : int, optional
Total number of parts.
References
----------
[1] https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
"""
bar_len = 50
size_downloaded = float(chunks_downloaded * chunk_size)
filled_len = int(math.floor((bar_len * size_downloaded) / total_size))
percent_downloaded = round(((size_downloaded * 100) / total_size), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if total_parts == 1:
sys.stdout.write(
'\r[%s] %s%s %s/%sMB downloaded' % (
bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
else:
sys.stdout.write(
'\r Part %s/%s [%s] %s%s %s/%sMB downloaded' % (
part + 1, total_parts, bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
def _create_base_dir():
"""Create the gensim-data directory in home directory, if it has not been already created.
Raises
------
Exception
An exception is raised when read/write permissions are not available or a file named gensim-data
already exists in the home directory.
"""
if not os.path.isdir(base_dir):
try:
logger.info("Creating %s", base_dir)
os.makedirs(base_dir)
except OSError as e:
if e.errno == errno.EEXIST:
raise Exception(
"Not able to create folder gensim-data in {}. File gensim-data "
"exists in the direcory already.".format(user_dir)
)
else:
raise Exception(
"Can't create {}. Make sure you have the read/write permissions "
"to the directory or you can try creating the folder manually"
.format(base_dir)
)
def _calculate_md5_checksum(fname):
"""Calculate the checksum of the file, exactly same as md5-sum linux util.
Parameters
----------
fname : str
Path to the file.
Returns
-------
str
MD5-hash of file names as `fname`.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def info(name=None, show_only_latest=True):
"""Provide the information related to model/dataset.
Parameters
----------
name : str, optional
Name of model/dataset. If not set - shows all available data.
show_only_latest : bool, optional
If storage contains different versions for one data/model, this flag allow to hide outdated versions.
Affects only if `name` is None.
Returns
-------
dict
Detailed information about one or all models/datasets.
If name is specified, return full information about concrete dataset/model,
otherwise, return information about all available datasets/models.
Raises
------
Exception
If name that has been passed is incorrect.
Examples
--------
>>> import gensim.downloader as api
>>> api.info("text8") # retrieve information about text8 dataset
{u'checksum': u'68799af40b6bda07dfa47a32612e5364',
u'description': u'Cleaned small sample from wikipedia',
u'file_name': u'text8.gz',
u'parts': 1,
u'source': u'http://mattmahoney.net/dc/text8.zip'}
>>>
>>> api.info() # retrieve information about all available datasets and models
"""
information = json.loads(urlopen(DATA_LIST_URL).read().decode("utf-8"))
if name is not None:
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]
elif name in models:
return information['models'][name]
else:
raise ValueError("Incorrect model/corpus name")
if not show_only_latest:
return information
return {
"corpora": {name: data for (name, data) in information['corpora'].items() if data.get("latest", True)},
"models": {name: data for (name, data) in information['models'].items() if data.get("latest", True)}
}
def _get_checksum(name, part=None):
"""Retrieve the checksum of the model/dataset from gensim-data repository.
Parameters
----------
name : str
Dataset/model name.
part : int, optional
Number of part (for multipart data only).
Returns
-------
str
Retrieved checksum of dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if part is None:
if name in corpora:
return information['corpora'][name]["checksum"]
elif name in models:
return information['models'][name]["checksum"]
else:
if name in corpora:
return information['corpora'][name]["checksum-{}".format(part)]
elif name in models:
return information['models'][name]["checksum-{}".format(part)]
def _get_parts(name):
"""Retrieve the number of parts in which dataset/model has been split.
Parameters
----------
name: str
Dataset/model name.
Returns
-------
int
Number of parts in which dataset/model has been split.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["parts"]
elif name in models:
return information['models'][name]["parts"]
def _download(name):
"""Download and extract the dataset/model.
Parameters
----------
name: str
Dataset/model name which has to be downloaded.
Raises
------
Exception
If md5sum on client and in repo are different.
"""
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir = os.path.join(base_dir, name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)
fname = "{f}.gz_0{p}".format(f=name, p=part)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)
shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)
def _get_filename(name):
"""Retrieve the filename of the dataset/model.
Parameters
----------
name: str
Name of dataset/model.
Returns
-------
str:
Filename of the dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["file_name"]
elif name in models:
return information['models'][name]["file_name"]
def load(name, return_path=False):
"""Download (if needed) dataset/model and load it to memory (unless `return_path` is set).
Parameters
----------
name: str
Name of the model/dataset.
return_path: bool, optional
If True, return full path to file, otherwise, return loaded model / iterable dataset.
Returns
-------
Model
Requested model, if `name` is model and `return_path` == False.
Dataset (iterable)
Requested dataset, if `name` is dataset and `return_path` == False.
str
Path to file with dataset / model, only when `return_path` == True.
Raises
------
Exception
Raised if `name` is incorrect.
Examples
--------
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>>
>>> wiki = api.load("wiki-en") # load extracted Wikipedia dump, around 6 Gb
>>> for article in wiki: # iterate over all wiki script
>>> ...
Download only example
>>> import gensim.downloader as api
>>>
>>> print(api.load("wiki-en", return_path=True)) # output: /home/user/gensim-data/wiki-en/wiki-en.gz
"""
_create_base_dir()
file_name = _get_filename(name)
if file_name is None:
raise ValueError("Incorrect model/corpus name")
folder_dir = os.path.join(base_dir, name)
path = os.path.join(folder_dir, file_name)
if not os.path.exists(folder_dir):
_download(name)
if return_path:
return path
else:
sys.path.insert(0, base_dir)
module = __import__(name)
return module.load_data()
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s :%(name)s :%(levelname)s :%(message)s', stream=sys.stdout, level=logging.INFO
)
parser = argparse.ArgumentParser(
description="Gensim console API",
usage="python -m gensim.api.downloader [-h] [-d data_name | -i data_name | -c]"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", "--download", metavar="data_name", nargs=1,
help="To download a corpus/model : python -m gensim.downloader -d <dataname>"
)
full_information = 1
group.add_argument(
"-i", "--info", metavar="data_name", nargs='?', const=full_information,
help="To get information about a corpus/model : python -m gensim.downloader -i <dataname>"
)
args = parser.parse_args()
if args.download is not None:
data_path = load(args.download[0], return_path=True)
logger.info("Data has been installed and data path is %s", data_path)
elif args.info is not None:
output = info() if (args.info == full_information) else info(name=args.info)
print(json.dumps(output, indent=4))
| 14,123 | 30.45657 | 112 | py |
poincare_glove | poincare_glove-master/gensim/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains various general utility functions."""
from __future__ import with_statement
from contextlib import contextmanager
import collections
import logging
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
import re
import unicodedata
import os
import random
import itertools
import tempfile
from functools import wraps
import multiprocessing
import shutil
import sys
import subprocess
import inspect
import numpy as np
import numbers
import scipy.sparse
from six import iterkeys, iteritems, u, string_types, unichr
from six.moves import xrange
from smart_open import smart_open
if sys.version_info[0] >= 3:
unicode = str
logger = logging.getLogger(__name__)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""Generate :class:`numpy.random.RandomState` based on input seed.
Parameters
----------
seed : {None, int, array_like}
Seed for random state.
Returns
-------
:class:`numpy.random.RandomState`
Random state.
Raises
------
AttributeError
If seed is not {None, int, array_like}.
Notes
-----
Method originally from [1]_ and written by @joshloyal.
References
----------
.. [1] https://github.com/maciejkula/glove-python
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a np.random.RandomState instance' % seed)
def synchronous(tlockname):
"""A decorator to place an instance-based lock around a method.
Notes
-----
Adapted from [2]_
References
----------
.. [2] http://code.activestate.com/recipes/577105-synchronization-decorator-for-class-methods/
"""
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
tlock = getattr(self, tlockname)
logger.debug("acquiring lock %r for %s", tlockname, func.__name__)
with tlock: # use lock as a context manager to perform safe acquire/release pairs
logger.debug("acquired lock %r for %s", tlockname, func.__name__)
result = func(self, *args, **kwargs)
logger.debug("releasing lock %r for %s", tlockname, func.__name__)
return result
return _synchronizer
return _synched
def file_or_filename(input):
"""Open file with `smart_open`.
Parameters
----------
input : str or file-like
Filename or file-like object.
Returns
-------
input : file-like object
Opened file OR seek out to 0 byte if `input` is already file-like object.
"""
if isinstance(input, string_types):
# input was a filename: open as file
return smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
return input
@contextmanager
def open_file(input):
"""Provide "with-like" behaviour except closing the file object.
Parameters
----------
input : str or file-like
Filename or file-like object.
Yields
-------
file
File-like object based on input (or input if this already file-like).
"""
mgr = file_or_filename(input)
exc = False
try:
yield mgr
except Exception:
# Handling any unhandled exceptions from the code nested in 'with' statement.
exc = True
if not isinstance(input, string_types) or not mgr.__exit__(*sys.exc_info()):
raise
# Try to introspect and silence errors.
finally:
if not exc and isinstance(input, string_types):
mgr.__exit__(None, None, None)
def deaccent(text):
"""Remove accentuation from the given string.
Parameters
----------
text : str
Input string.
Returns
-------
str
Unicode string without accentuation.
Examples
--------
>>> from gensim.utils import deaccent
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""Recursively copy a directory ala shutils.copytree, but hardlink files instead of copying.
Parameters
----------
source : str
Path to source directory
dest : str
Path to destination directory
Warnings
--------
Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(text, lowercase=False, deacc=False, encoding='utf8', errors="strict", to_lower=False, lower=False):
"""Iteratively yield tokens as unicode strings, removing accent marks and optionally lowercasing string
if any from `lowercase`, `to_lower`, `lower` set to True.
Parameters
----------
text : str
Input string.
lowercase : bool, optional
If True - lowercase input string.
deacc : bool, optional
If True - remove accentuation from string by :func:`~gensim.utils.deaccent`.
encoding : str, optional
Encoding of input string, used as parameter for :func:`~gensim.utils.to_unicode`.
errors : str, optional
Error handling behaviour, used as parameter for :func:`~gensim.utils.to_unicode`.
to_lower : bool, optional
Same as `lowercase`.
lower : bool, optional
Same as `lowercase`.
Yields
------
str
Contiguous sequences of alphabetic characters (no digits!), using :func:`~gensim.utils.simple_tokenize`
Examples
--------
>>> from gensim.utils import tokenize
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc=True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
"""Tokenize input test using :const:`gensim.utils.PAT_ALPHABETIC`.
Parameters
----------
text : str
Input text.
Yields
------
str
Tokens from `text`.
"""
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""Convert a document into a list of tokens (also with lowercase and optional de-accents),
used :func:`~gensim.utils.tokenize`.
Parameters
----------
doc : str
Input document.
deacc : bool, optional
If True - remove accentuation from string by :func:`~gensim.utils.deaccent`.
min_len : int, optional
Minimal length of token in result (inclusive).
max_len : int, optional
Maximal length of token in result (inclusive).
Returns
-------
list of str
Tokens extracted from `doc`.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert `text` to bytestring in utf8.
Parameters
----------
text : str
Input text.
errors : str, optional
Error handling behaviour, used as parameter for `unicode` function (python2 only).
encoding : str, optional
Encoding of `text` for `unicode` function (python2 only).
Returns
-------
str
Bytestring in utf8.
"""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert `text` to unicode.
Parameters
----------
text : str
Input text.
errors : str, optional
Error handling behaviour, used as parameter for `unicode` function (python2 only).
encoding : str, optional
Encoding of `text` for `unicode` function (python2 only).
Returns
-------
str
Unicode version of `text`.
"""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
def call_on_class_only(*args, **kwargs):
"""Helper for raise `AttributeError` if method should be called from instance.
Parameters
----------
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
Raises
------
AttributeError
If `load` method are called on instance.
"""
raise AttributeError('This method should be called on a class object.')
class SaveLoad(object):
"""Class which inherit from this class have save/load functions, which un/pickle them to disk.
Warnings
--------
This uses pickle for de/serializing, so objects must not contain unpicklable attributes,
such as lambda functions etc.
"""
@classmethod
def load(cls, fname, mmap=None):
"""Load a previously saved object (using :meth:`~gensim.utils.SaveLoad.save`) from file.
Parameters
----------
fname : str
Path to file that contains needed object.
mmap : str, optional
Memory-map option. If the object was saved with large arrays stored separately, you can load these arrays
via mmap (shared memory) using `mmap='r'.
If the file being loaded is compressed (either '.gz' or '.bz2'), then `mmap=None` **must be** set.
See Also
--------
:meth:`~gensim.utils.SaveLoad.save`
Returns
-------
object
Object loaded from `fname`.
Raises
------
IOError
When methods are called on instance (should be called from class).
"""
logger.info("loading %s object from %s", cls.__name__, fname)
compress, subname = SaveLoad._adapt_by_suffix(fname)
obj = unpickle(fname)
obj._load_specials(fname, mmap, compress, subname)
logger.info("loaded %s", fname)
return obj
def _load_specials(self, fname, mmap, compress, subname):
"""Loads any attributes that were stored specially, and gives the same opportunity
to recursively included :class:`~gensim.utils.SaveLoad` instances.
Parameters
----------
fname : str
Path to file that contains needed object.
mmap : str
Memory-map option.
compress : bool
Set to True if file is compressed.
subname : str
...
"""
def mmap_error(obj, filename):
return IOError(
'Cannot mmap compressed object %s in file %s. ' % (obj, filename) +
'Use `load(fname, mmap=None)` or uncompress files manually.'
)
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info("loading %s recursively from %s.* with mmap=%s", attrib, cfname, mmap)
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info("loading %s from %s with mmap=%s", attrib, subname(fname, attrib), mmap)
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = np.load(subname(fname, attrib))['val']
else:
val = np.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info("loading %s from %s with mmap=%s", attrib, subname(fname, attrib), mmap)
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with np.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = np.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = np.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = np.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info("setting ignored attribute %s to None", attrib)
setattr(self, attrib, None)
@staticmethod
def _adapt_by_suffix(fname):
"""Give appropriate compress setting and filename formula.
Parameters
----------
fname : str
Input filename.
Returns
-------
(bool, function)
First argument will be True if `fname` compressed.
"""
compress, suffix = (True, 'npz') if fname.endswith('.gz') or fname.endswith('.bz2') else (False, 'npy')
return compress, lambda *args: '.'.join(args + (suffix,))
def _smart_save(self, fname, separately=None, sep_limit=10 * 1024**2, ignore=frozenset(), pickle_protocol=2):
"""Save the object to file.
Parameters
----------
fname : str
Path to file.
separately : list, optional
Iterable of attributes than need to store distinctly.
sep_limit : int, optional
Limit for separation.
ignore : frozenset, optional
Attributes that shouldn't be store.
pickle_protocol : int, optional
Protocol number for pickle.
Notes
-----
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
See Also
--------
:meth:`~gensim.utils.SaveLoad.load`
"""
logger.info("saving %s object under %s, separately %s", self.__class__.__name__, fname, separately)
compress, subname = SaveLoad._adapt_by_suffix(fname)
restores = self._save_specials(fname, separately, sep_limit, ignore, pickle_protocol,
compress, subname)
try:
pickle(self, fname, protocol=pickle_protocol)
finally:
# restore attribs handled specially
for obj, asides in restores:
for attrib, val in iteritems(asides):
setattr(obj, attrib, val)
logger.info("saved %s", fname)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Save aside any attributes that need to be handled separately, including
by recursion any attributes that are themselves :class:`~gensim.utils.SaveLoad` instances.
Parameters
----------
fname : str
Output filename.
separately : list or None
Iterable of attributes than need to store distinctly
sep_limit : int
Limit for separation.
ignore : iterable of str
Attributes that shouldn't be store.
pickle_protocol : int
Protocol number for pickle.
compress : bool
If True - compress output with :func:`numpy.savez_compressed`.
subname : function
Produced by :meth:`~gensim.utils.SaveLoad._adapt_by_suffix`
Returns
-------
list of (obj, {attrib: value, ...})
Settings that the caller should use to restore each object's attributes that were set aside
during the default :func:`~gensim.utils.pickle`.
"""
asides = {}
sparse_matrices = (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)
if separately is None:
separately = []
for attrib, val in iteritems(self.__dict__):
if isinstance(val, np.ndarray) and val.size >= sep_limit:
separately.append(attrib)
elif isinstance(val, sparse_matrices) and val.nnz >= sep_limit:
separately.append(attrib)
# whatever's in `separately` or `ignore` at this point won't get pickled
for attrib in separately + list(ignore):
if hasattr(self, attrib):
asides[attrib] = getattr(self, attrib)
delattr(self, attrib)
recursive_saveloads = []
restores = []
for attrib, val in iteritems(self.__dict__):
if hasattr(val, '_save_specials'): # better than 'isinstance(val, SaveLoad)' if IPython reloading
recursive_saveloads.append(attrib)
cfname = '.'.join((fname, attrib))
restores.extend(val._save_specials(cfname, None, sep_limit, ignore, pickle_protocol, compress, subname))
try:
numpys, scipys, ignoreds = [], [], []
for attrib, val in iteritems(asides):
if isinstance(val, np.ndarray) and attrib not in ignore:
numpys.append(attrib)
logger.info("storing np array '%s' to %s", attrib, subname(fname, attrib))
if compress:
np.savez_compressed(subname(fname, attrib), val=np.ascontiguousarray(val))
else:
np.save(subname(fname, attrib), np.ascontiguousarray(val))
elif isinstance(val, (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)) and attrib not in ignore:
scipys.append(attrib)
logger.info("storing scipy.sparse array '%s' under %s", attrib, subname(fname, attrib))
if compress:
np.savez_compressed(
subname(fname, attrib, 'sparse'),
data=val.data,
indptr=val.indptr,
indices=val.indices
)
else:
np.save(subname(fname, attrib, 'data'), val.data)
np.save(subname(fname, attrib, 'indptr'), val.indptr)
np.save(subname(fname, attrib, 'indices'), val.indices)
data, indptr, indices = val.data, val.indptr, val.indices
val.data, val.indptr, val.indices = None, None, None
try:
# store array-less object
pickle(val, subname(fname, attrib), protocol=pickle_protocol)
finally:
val.data, val.indptr, val.indices = data, indptr, indices
else:
logger.info("not storing attribute %s", attrib)
ignoreds.append(attrib)
self.__dict__['__numpys'] = numpys
self.__dict__['__scipys'] = scipys
self.__dict__['__ignoreds'] = ignoreds
self.__dict__['__recursive_saveloads'] = recursive_saveloads
except Exception:
# restore the attributes if exception-interrupted
for attrib, val in iteritems(asides):
setattr(self, attrib, val)
raise
return restores + [(self, asides)]
def save(self, fname_or_handle, separately=None, sep_limit=10 * 1024**2, ignore=frozenset(), pickle_protocol=2):
"""Save the object to file.
Parameters
----------
fname_or_handle : str or file-like
Path to output file or already opened file-like object. If the object is a file handle,
no special array handling will be performed, all attributes will be saved to the same file.
separately : list of str or None, optional
If None - automatically detect large numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and allows mmap'ing large arrays
back on load efficiently.
If list of str - this attributes will be stored in separate files, the automatic check
is not performed in this case.
sep_limit : int
Limit for automatic separation.
ignore : frozenset of str
Attributes that shouldn't be serialize/store.
pickle_protocol : int
Protocol number for pickle.
See Also
--------
:meth:`~gensim.utils.SaveLoad.load`
"""
try:
_pickle.dump(self, fname_or_handle, protocol=pickle_protocol)
logger.info("saved %s object", self.__class__.__name__)
except TypeError: # `fname_or_handle` does not have write attribute
self._smart_save(fname_or_handle, separately, sep_limit, ignore, pickle_protocol=pickle_protocol)
def identity(p):
"""Identity fnc, for flows that don't accept lambda (pickling etc).
Parameters
----------
p : object
Input parameter.
Returns
-------
object
Same as `p`.
"""
return p
def get_max_id(corpus):
"""Get the highest feature id that appears in the corpus.
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
Returns
------
int
Highest feature id.
Notes
-----
For empty `corpus` return -1.
"""
maxid = -1
for document in corpus:
maxid = max(maxid, max([-1] + [fieldid for fieldid, _ in document])) # [-1] to avoid exceptions from max(empty)
return maxid
class FakeDict(object):
"""Objects of this class act as dictionaries that map integer->str(integer), for a specified
range of integers <0, num_terms).
This is meant to avoid allocating real dictionaries when `num_terms` is huge, which is a waste of memory.
"""
def __init__(self, num_terms):
"""
Parameters
----------
num_terms : int
Number of terms.
"""
self.num_terms = num_terms
def __str__(self):
return "FakeDict(num_terms=%s)" % self.num_terms
def __getitem__(self, val):
if 0 <= val < self.num_terms:
return str(val)
raise ValueError("internal id out of bounds (%s, expected <0..%s))" % (val, self.num_terms))
def iteritems(self):
"""Iterate over all keys and values.
Yields
------
(int, str)
Pair of (id, token).
"""
for i in xrange(self.num_terms):
yield i, str(i)
def keys(self):
"""Override the `dict.keys()`, which is used to determine the maximum internal id of a corpus,
i.e. the vocabulary dimensionality.
Returns
-------
list of int
Highest id, packed in list.
Warnings
--------
To avoid materializing the whole `range(0, self.num_terms)`,
this returns the highest id = `[self.num_terms - 1]` only.
"""
return [self.num_terms - 1]
def __len__(self):
return self.num_terms
def get(self, val, default=None):
if 0 <= val < self.num_terms:
return str(val)
return default
def dict_from_corpus(corpus):
"""Scan corpus for all word ids that appear in it, then construct a mapping
which maps each `word_id` -> `str(word_id)`.
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
Returns
------
id2word : :class:`~gensim.utils.FakeDict`
"Fake" mapping which maps each `word_id` -> `str(word_id)`.
Warnings
--------
This function is used whenever *words* need to be displayed (as opposed to just their ids)
but no `word_id` -> `word` mapping was provided. The resulting mapping only covers words actually
used in the corpus, up to the highest `word_id` found.
"""
num_terms = 1 + get_max_id(corpus)
id2word = FakeDict(num_terms)
return id2word
def is_corpus(obj):
"""Check whether `obj` is a corpus.
Parameters
----------
obj : object
Something `iterable of iterable` that contains (int, int).
Return
------
(bool, object)
Pair of (is_corpus, `obj`), is_corpus True if `obj` is corpus.
Warnings
--------
An "empty" corpus (empty input sequence) is ambiguous, so in this case
the result is forcefully defined as (False, `obj`).
"""
try:
if 'Corpus' in obj.__class__.__name__: # the most common case, quick hack
return True, obj
except Exception:
pass
try:
if hasattr(obj, 'next') or hasattr(obj, '__next__'):
# the input is an iterator object, meaning once we call next()
# that element could be gone forever. we must be careful to put
# whatever we retrieve back again
doc1 = next(obj)
obj = itertools.chain([doc1], obj)
else:
doc1 = next(iter(obj)) # empty corpus is resolved to False here
if len(doc1) == 0: # sparse documents must have a __len__ function (list, tuple...)
return True, obj # the first document is empty=>assume this is a corpus
# if obj is a 1D numpy array(scalars) instead of 2-tuples, it resolves to False here
id1, val1 = next(iter(doc1))
id1, val1 = int(id1), float(val1) # must be a 2-tuple (integer, float)
except Exception:
return False, obj
return True, obj
def get_my_ip():
"""Try to obtain our external ip (from the Pyro4 nameserver's point of view)
Returns
-------
str
IP address.
Warnings
--------
This tries to sidestep the issue of bogus `/etc/hosts` entries and other local misconfiguration,
which often mess up hostname resolution.
If all else fails, fall back to simple `socket.gethostbyname()` lookup.
"""
import socket
try:
from Pyro4.naming import locateNS
# we know the nameserver must exist, so use it as our anchor point
ns = locateNS()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((ns._pyroUri.host, ns._pyroUri.port))
result, port = s.getsockname()
except Exception:
try:
# see what ifconfig says about our default interface
import commands
result = commands.getoutput("ifconfig").split("\n")[1].split()[1][5:]
if len(result.split('.')) != 4:
raise Exception()
except Exception:
# give up, leave the resolution to gethostbyname
result = socket.gethostbyname(socket.gethostname())
return result
class RepeatCorpus(SaveLoad):
"""Wrap a `corpus` as another corpus of length `reps`. This is achieved by repeating documents from `corpus`
over and over again, until the requested length `len(result) == reps` is reached.
Repetition is done on-the-fly=efficiently, via `itertools`.
Examples
--------
>>> from gensim.utils import RepeatCorpus
>>>
>>> corpus = [[(1, 2)], []] # 2 documents
>>> list(RepeatCorpus(corpus, 5)) # repeat 2.5 times to get 5 documents
[[(1, 2)], [], [(1, 2)], [], [(1, 2)]]
"""
def __init__(self, corpus, reps):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
reps : int
Number of repeats for documents from corpus.
"""
self.corpus = corpus
self.reps = reps
def __iter__(self):
return itertools.islice(itertools.cycle(self.corpus), self.reps)
class RepeatCorpusNTimes(SaveLoad):
"""Wrap a `corpus` and repeat it `n` times.
Examples
--------
>>> from gensim.utils import RepeatCorpusNTimes
>>>
>>> corpus = [[(1, 0.5)], []]
>>> list(RepeatCorpusNTimes(corpus, 3)) # repeat 3 times
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)], []]
"""
def __init__(self, corpus, n):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
n : int
Number of repeats for corpus.
"""
self.corpus = corpus
self.n = n
def __iter__(self):
for _ in xrange(self.n):
for document in self.corpus:
yield document
class ClippedCorpus(SaveLoad):
"""Wrap a `corpus` and return `max_doc` element from it"""
def __init__(self, corpus, max_docs=None):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
max_docs : int
Maximal number of documents in result corpus.
Warnings
--------
Any documents after `max_docs` are ignored. This effectively limits the length of the returned corpus
to <= `max_docs`. Set `max_docs=None` for "no limit", effectively wrapping the entire input corpus.
"""
self.corpus = corpus
self.max_docs = max_docs
def __iter__(self):
return itertools.islice(self.corpus, self.max_docs)
def __len__(self):
return min(self.max_docs, len(self.corpus))
class SlicedCorpus(SaveLoad):
"""Wrap `corpus` and return the slice of it"""
def __init__(self, corpus, slice_):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
slice_ : slice or iterable
Slice for `corpus`
Notes
-----
Negative slicing can only be used if the corpus is indexable, otherwise, the corpus will be iterated over.
Slice can also be a np.ndarray to support fancy indexing.
Calculating the size of a SlicedCorpus is expensive when using a slice as the corpus has
to be iterated over once. Using a list or np.ndarray does not have this drawback, but consumes more memory.
"""
self.corpus = corpus
self.slice_ = slice_
self.length = None
def __iter__(self):
if hasattr(self.corpus, 'index') and len(self.corpus.index) > 0:
return (self.corpus.docbyoffset(i) for i in self.corpus.index[self.slice_])
return itertools.islice(self.corpus, self.slice_.start, self.slice_.stop, self.slice_.step)
def __len__(self):
# check cached length, calculate if needed
if self.length is None:
if isinstance(self.slice_, (list, np.ndarray)):
self.length = len(self.slice_)
elif isinstance(self.slice_, slice):
(start, end, step) = self.slice_.indices(len(self.corpus.index))
diff = end - start
self.length = diff // step + (diff % step > 0)
else:
self.length = sum(1 for x in self)
return self.length
def safe_unichr(intval):
"""
Parameters
----------
intval : int
Integer code of character
Returns
-------
string
Unicode string of character
"""
try:
return unichr(intval)
except ValueError:
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
s = "\\U%08x" % intval
# return UTF16 surrogate pair
return s.decode('unicode-escape')
def decode_htmlentities(text):
"""Decode HTML entities in text, coded as hex, decimal or named.
This function from [3]_.
Parameters
----------
text : str
Input html text.
Examples
--------
>>> from gensim.utils import decode_htmlentities
>>>
>>> u = u'E tu vivrai nel terrore - L'aldilà (1981)'
>>> print(decode_htmlentities(u).encode('UTF-8'))
E tu vivrai nel terrore - L'aldilà (1981)
>>> print(decode_htmlentities("l'eau"))
l'eau
>>> print(decode_htmlentities("foo < bar"))
foo < bar
References
----------
.. [3] http://github.com/sku/python-twitter-ircbot/blob/321d94e0e40d0acc92f5bf57d126b57369da70de/html_decode.py
"""
def substitute_entity(match):
try:
ent = match.group(3)
if match.group(1) == "#":
# decoding by number
if match.group(2) == '':
# number is in decimal
return safe_unichr(int(ent))
elif match.group(2) in ['x', 'X']:
# number is in hex
return safe_unichr(int(ent, 16))
else:
# they were using a name
cp = n2cp.get(ent)
if cp:
return safe_unichr(cp)
else:
return match.group()
except Exception:
# in case of errors, return original input
return match.group()
return RE_HTML_ENTITY.sub(substitute_entity, text)
def chunkize_serial(iterable, chunksize, as_numpy=False):
"""Give elements from the iterable in `chunksize`-ed lists.
The last returned element may be smaller (if length of collection is not divisible by `chunksize`).
Parameters
----------
iterable : iterable of object
Any iterable.
chunksize : int
Size of chunk from result.
as_numpy : bool, optional
If True - yield `np.ndarray`, otherwise - list
Yields
------
list of object OR np.ndarray
Groups based on `iterable`
Examples
--------
>>> print(list(grouper(range(10), 3)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
it = iter(iterable)
while True:
if as_numpy:
# convert each document to a 2d numpy array (~6x faster when transmitting
# chunk data over the wire, in Pyro)
wrapped_chunk = [[np.array(doc) for doc in itertools.islice(it, int(chunksize))]]
else:
wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]
if not wrapped_chunk[0]:
break
# memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference
yield wrapped_chunk.pop()
grouper = chunkize_serial
class InputQueue(multiprocessing.Process):
def __init__(self, q, corpus, chunksize, maxsize, as_numpy):
super(InputQueue, self).__init__()
self.q = q
self.maxsize = maxsize
self.corpus = corpus
self.chunksize = chunksize
self.as_numpy = as_numpy
def run(self):
it = iter(self.corpus)
while True:
chunk = itertools.islice(it, self.chunksize)
if self.as_numpy:
# HACK XXX convert documents to numpy arrays, to save memory.
# This also gives a scipy warning at runtime:
# "UserWarning: indices array has non-integer dtype (float64)"
wrapped_chunk = [[np.asarray(doc) for doc in chunk]]
else:
wrapped_chunk = [list(chunk)]
if not wrapped_chunk[0]:
self.q.put(None, block=True)
break
try:
qsize = self.q.qsize()
except NotImplementedError:
qsize = '?'
logger.debug("prepared another chunk of %i documents (qsize=%s)", len(wrapped_chunk[0]), qsize)
self.q.put(wrapped_chunk.pop(), block=True)
if os.name == 'nt':
warnings.warn("detected Windows; aliasing chunkize to chunkize_serial")
def chunkize(corpus, chunksize, maxsize=0, as_numpy=False):
"""Split `corpus` into smaller chunks, used :func:`~gensim.utils.chunkize_serial`.
Parameters
----------
corpus : iterable of object
Any iterable object.
chunksize : int
Size of chunk from result.
maxsize : int, optional
THIS PARAMETER IGNORED.
as_numpy : bool, optional
If True - yield `np.ndarray`, otherwise - list
Yields
------
list of object OR np.ndarray
Groups based on `iterable`
"""
for chunk in chunkize_serial(corpus, chunksize, as_numpy=as_numpy):
yield chunk
else:
def chunkize(corpus, chunksize, maxsize=0, as_numpy=False):
"""Split `corpus` into smaller chunks, used :func:`~gensim.utils.chunkize_serial`.
Parameters
----------
corpus : iterable of object
Any iterable object.
chunksize : int
Size of chunk from result.
maxsize : int, optional
THIS PARAMETER IGNORED.
as_numpy : bool, optional
If True - yield `np.ndarray`, otherwise - list
Notes
-----
Each chunk is of length `chunksize`, except the last one which may be smaller.
A once-only input stream (`corpus` from a generator) is ok, chunking is done efficiently via itertools.
If `maxsize > 1`, don't wait idly in between successive chunk `yields`, but rather keep filling a short queue
(of size at most `maxsize`) with forthcoming chunks in advance. This is realized by starting a separate process,
and is meant to reduce I/O delays, which can be significant when `corpus` comes from a slow medium (like HDD).
If `maxsize == 0`, don't fool around with parallelism and simply yield the chunksize
via :func:`~gensim.utils.chunkize_serial` (no I/O optimizations).
Yields
------
list of object OR np.ndarray
Groups based on `iterable`
"""
assert chunksize > 0
if maxsize > 0:
q = multiprocessing.Queue(maxsize=maxsize)
worker = InputQueue(q, corpus, chunksize, maxsize=maxsize, as_numpy=as_numpy)
worker.daemon = True
worker.start()
while True:
chunk = [q.get(block=True)]
if chunk[0] is None:
break
yield chunk.pop()
else:
for chunk in chunkize_serial(corpus, chunksize, as_numpy=as_numpy):
yield chunk
def smart_extension(fname, ext):
"""Generate filename with `ext`.
Parameters
----------
fname : str
Path to file.
ext : str
File extension.
Returns
-------
str
New path to file with `ext`.
"""
fname, oext = os.path.splitext(fname)
if oext.endswith('.bz2'):
fname = fname + oext[:-4] + ext + '.bz2'
elif oext.endswith('.gz'):
fname = fname + oext[:-3] + ext + '.gz'
else:
fname = fname + oext + ext
return fname
def pickle(obj, fname, protocol=2):
"""Pickle object `obj` to file `fname`.
Parameters
----------
obj : object
Any python object.
fname : str
Path to pickle file.
protocol : int, optional
Pickle protocol number, default is 2 to support compatible across python 2.x and 3.x.
"""
with smart_open(fname, 'wb') as fout: # 'b' for binary, needed on Windows
_pickle.dump(obj, fout, protocol=protocol)
def unpickle(fname):
"""Load object from `fname`.
Parameters
----------
fname : str
Path to pickle file.
Returns
-------
object
Python object loaded from `fname`.
"""
with smart_open(fname, 'rb') as f:
# Because of loading from S3 load can't be used (missing readline in smart_open)
if sys.version_info > (3, 0):
return _pickle.load(f, encoding='latin1')
else:
return _pickle.loads(f.read())
def revdict(d):
"""Reverse a dictionary mapping, i.e. `{1: 2, 3: 4}` -> `{2: 1, 4: 3}`.
Parameters
----------
d : dict
Input dictionary.
Returns
-------
dict
Reversed dictionary mapping.
Notes
-----
When two keys map to the same value, only one of them will be kept in the result (which one is kept is arbitrary).
Examples
--------
>>> from gensim.utils import revdict
>>> d = {1: 2, 3: 4}
>>> revdict(d)
{2: 1, 4: 3}
"""
return {v: k for (k, v) in iteritems(dict(d))}
def deprecated(reason):
"""Decorator which can be used to mark functions as deprecated.
Parameters
----------
reason : str
Reason of deprecation.
Returns
-------
function
Decorated function
Notes
-----
It will result in a warning being emitted when the function is used, base code from [4]_.
References
----------
.. [4] https://stackoverflow.com/a/40301488/8001386
"""
if isinstance(reason, string_types):
def decorator(func):
fmt = "Call to deprecated `{name}` ({reason})."
@wraps(func)
def new_func1(*args, **kwargs):
warnings.warn(
fmt.format(name=func.__name__, reason=reason),
category=DeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
func = reason
fmt = "Call to deprecated `{name}`."
@wraps(func)
def new_func2(*args, **kwargs):
warnings.warn(
fmt.format(name=func.__name__),
category=DeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
@deprecated("Function will be removed in 4.0.0")
def toptexts(query, texts, index, n=10):
"""
Debug fnc to help inspect the top `n` most similar documents (according to a
similarity index `index`), to see if they are actually related to the query.
Parameters
----------
query : list
vector OR BoW (list of tuples)
texts : str
object that can return something insightful for each document via `texts[docid]`,
such as its fulltext or snippet.
index : any
a class from gensim.similarity.docsim
Return
------
list
a list of 3-tuples (docid, doc's similarity to the query, texts[docid])
"""
sims = index[query] # perform a similarity query against the corpus
sims = sorted(enumerate(sims), key=lambda item: -item[1])
return [(topid, topcosine, texts[topid]) for topid, topcosine in sims[:n]] # only consider top-n most similar docs
def randfname(prefix='gensim'):
"""Generate path with random filename/
Parameters
----------
prefix : str
Prefix of filename.
Returns
-------
str
Full path with random filename (in temporary folder).
"""
randpart = hex(random.randint(0, 0xffffff))[2:]
return os.path.join(tempfile.gettempdir(), prefix + randpart)
@deprecated("Function will be removed in 4.0.0")
def upload_chunked(server, docs, chunksize=1000, preprocess=None):
"""Memory-friendly upload of documents to a SimServer (or Pyro SimServer proxy).
Notes
-----
Use this function to train or index large collections -- avoid sending the
entire corpus over the wire as a single Pyro in-memory object. The documents
will be sent in smaller chunks, of `chunksize` documents each.
"""
start = 0
for chunk in grouper(docs, chunksize):
end = start + len(chunk)
logger.info("uploading documents %i-%i", start, end - 1)
if preprocess is not None:
pchunk = []
for doc in chunk:
doc['tokens'] = preprocess(doc['text'])
del doc['text']
pchunk.append(doc)
chunk = pchunk
server.buffer(chunk)
start = end
def getNS(host=None, port=None, broadcast=True, hmac_key=None):
"""Get a Pyro4 name server proxy.
Parameters
----------
host : str, optional
Hostname of ns.
port : int, optional
Port of ns.
broadcast : bool, optional
If True - use broadcast mechanism (i.e. all Pyro nodes in local network), not otherwise.
hmac_key : str, optional
Private key.
Raises
------
RuntimeError
when Pyro name server is not found
Returns
-------
:class:`Pyro4.core.Proxy`
Proxy from Pyro4.
"""
import Pyro4
try:
return Pyro4.locateNS(host, port, broadcast, hmac_key)
except Pyro4.errors.NamingError:
raise RuntimeError("Pyro name server not found")
def pyro_daemon(name, obj, random_suffix=False, ip=None, port=None, ns_conf=None):
"""Register object with name server (starting the name server if not running
yet) and block until the daemon is terminated. The object is registered under
`name`, or `name`+ some random suffix if `random_suffix` is set.
"""
if ns_conf is None:
ns_conf = {}
if random_suffix:
name += '.' + hex(random.randint(0, 0xffffff))[2:]
import Pyro4
with getNS(**ns_conf) as ns:
with Pyro4.Daemon(ip or get_my_ip(), port or 0) as daemon:
# register server for remote access
uri = daemon.register(obj, name)
ns.remove(name)
ns.register(name, uri)
logger.info("%s registered with nameserver (URI '%s')", name, uri)
daemon.requestLoop()
def has_pattern():
"""Check that `pattern` [5]_ package already installed.
Returns
-------
bool
True if `pattern` installed, False otherwise.
References
----------
.. [5] https://github.com/clips/pattern
"""
try:
from pattern.en import parse # noqa:F401
return True
except ImportError:
return False
def lemmatize(content, allowed_tags=re.compile(r'(NN|VB|JJ|RB)'), light=False,
stopwords=frozenset(), min_length=2, max_length=15):
"""Use the English lemmatizer from `pattern` [5]_ to extract UTF8-encoded tokens in
their base form=lemma, e.g. "are, is, being" -> "be" etc.
This is a smarter version of stemming, taking word context into account.
Parameters
----------
content : str
Input string
allowed_tags : :class:`_sre.SRE_Pattern`, optional
Compiled regexp to select POS that will be used.
Only considers nouns, verbs, adjectives and adverbs by default (=all other lemmas are discarded).
light : bool, optional
DEPRECATED FLAG, DOESN'T SUPPORT BY `pattern`.
stopwords : frozenset
Set of words that will be removed from output.
min_length : int
Minimal token length in output (inclusive).
max_length : int
Maximal token length in output (inclusive).
Returns
-------
list of str
List with tokens with POS tag.
Warnings
--------
This function is only available when the optional 'pattern' package is installed.
Examples
--------
>>> from gensim.utils import lemmatize
>>> lemmatize('Hello World! How is it going?! Nonexistentword, 21')
['world/NN', 'be/VB', 'go/VB', 'nonexistentword/NN']
>>> lemmatize('The study ranks high.')
['study/NN', 'rank/VB', 'high/JJ']
>>> lemmatize('The ranks study hard.')
['rank/NN', 'study/VB', 'hard/RB']
"""
if not has_pattern():
raise ImportError(
"Pattern library is not installed. Pattern library is needed in order to use lemmatize function"
)
from pattern.en import parse
if light:
import warnings
warnings.warn("The light flag is no longer supported by pattern.")
# tokenization in `pattern` is weird; it gets thrown off by non-letters,
# producing '==relate/VBN' or '**/NN'... try to preprocess the text a little
# FIXME this throws away all fancy parsing cues, including sentence structure,
# abbreviations etc.
content = u(' ').join(tokenize(content, lower=True, errors='ignore'))
parsed = parse(content, lemmata=True, collapse=False)
result = []
for sentence in parsed:
for token, tag, _, _, lemma in sentence:
if min_length <= len(lemma) <= max_length and not lemma.startswith('_') and lemma not in stopwords:
if allowed_tags.match(tag):
lemma += "/" + tag[:2]
result.append(lemma.encode('utf8'))
return result
def mock_data_row(dim=1000, prob_nnz=0.5, lam=1.0):
"""Create a random gensim BoW vector.
Parameters
----------
dim : int, optional
Dimension of vector.
prob_nnz : float, optional
Probability of each coordinate will be nonzero, will be drawn from Poisson distribution.
lam : float, optional
Parameter for Poisson distribution.
Returns
-------
list of (int, float)
Vector in BoW format.
"""
nnz = np.random.uniform(size=(dim,))
return [(i, float(np.random.poisson(lam=lam) + 1.0)) for i in xrange(dim) if nnz[i] < prob_nnz]
def mock_data(n_items=1000, dim=1000, prob_nnz=0.5, lam=1.0):
"""Create a random gensim-style corpus (BoW), used :func:`~gensim.utils.mock_data_row`.
Parameters
----------
n_items : int
Size of corpus
dim : int
Dimension of vector, used for :func:`~gensim.utils.mock_data_row`.
prob_nnz : float, optional
Probability of each coordinate will be nonzero, will be drawn from Poisson distribution,
used for :func:`~gensim.utils.mock_data_row`.
lam : float, optional
Parameter for Poisson distribution, used for :func:`~gensim.utils.mock_data_row`.
Returns
-------
list of list of (int, float)
Gensim-style corpus.
"""
return [mock_data_row(dim=dim, prob_nnz=prob_nnz, lam=lam) for _ in xrange(n_items)]
def prune_vocab(vocab, min_reduce, trim_rule=None):
"""Remove all entries from the `vocab` dictionary with count smaller than `min_reduce`.
Modifies `vocab` in place, returns the sum of all counts that were pruned.
Parameters
----------
vocab : dict
Input dictionary.
min_reduce : int
Frequency threshold for tokens in `vocab`.
trim_rule : function, optional
Function for trimming entities from vocab, default behaviour is `vocab[w] <= min_reduce`.
Returns
-------
result : int
Sum of all counts that were pruned.
"""
result = 0
old_len = len(vocab)
for w in list(vocab): # make a copy of dict's keys
if not keep_vocab_item(w, vocab[w], min_reduce, trim_rule): # vocab[w] <= min_reduce:
result += vocab[w]
del vocab[w]
logger.info(
"pruned out %i tokens with count <=%i (before %i, after %i)",
old_len - len(vocab), min_reduce, old_len, len(vocab)
)
return result
def qsize(queue):
"""Get the (approximate) queue size where available.
Parameters
----------
queue : :class:`queue.Queue`
Input queue.
Returns
-------
int
Queue size, -1 if `qsize` method isn't implemented (OS X).
"""
try:
return queue.qsize()
except NotImplementedError:
# OS X doesn't support qsize
return -1
RULE_DEFAULT = 0
RULE_DISCARD = 1
RULE_KEEP = 2
def keep_vocab_item(word, count, min_count, trim_rule=None):
"""Check that should we keep `word` in vocab or remove.
Parameters
----------
word : str
Input word.
count : int
Number of times that word contains in corpus.
min_count : int
Frequency threshold for `word`.
trim_rule : function, optional
Function for trimming entities from vocab, default behaviour is `vocab[w] <= min_reduce`.
Returns
-------
bool
True if `word` should stay, False otherwise.
"""
default_res = count >= min_count
if trim_rule is None:
return default_res
else:
rule_res = trim_rule(word, count, min_count)
if rule_res == RULE_KEEP:
return True
elif rule_res == RULE_DISCARD:
return False
else:
return default_res
def check_output(stdout=subprocess.PIPE, *popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib + small modification.
Widely used for :mod:`gensim.models.wrappers`.
Very similar with [6]_
Examples
--------
>>> from gensim.utils import check_output
>>> check_output(args=['echo', '1'])
'1\n'
Raises
------
KeyboardInterrupt
If Ctrl+C pressed.
References
----------
.. [6] https://docs.python.org/2/library/subprocess.html#subprocess.check_output
"""
try:
logger.debug("COMMAND: %s %s", popenargs, kwargs)
process = subprocess.Popen(stdout=stdout, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
except KeyboardInterrupt:
process.terminate()
raise
def sample_dict(d, n=10, use_random=True):
"""Pick `n` items from dictionary `d`.
Parameters
----------
d : dict
Input dictionary.
n : int, optional
Number of items that will be picked.
use_random : bool, optional
If True - pick items randomly, otherwise - according to natural dict iteration.
Returns
-------
list of (object, object)
Picked items from dictionary, represented as list.
"""
selected_keys = random.sample(list(d), min(len(d), n)) if use_random else itertools.islice(iterkeys(d), n)
return [(key, d[key]) for key in selected_keys]
def strided_windows(ndarray, window_size):
"""Produce a numpy.ndarray of windows, as from a sliding window.
Parameters
----------
ndarray : numpy.ndarray
Input array
window_size : int
Sliding window size.
Returns
-------
numpy.ndarray
Subsequences produced by sliding a window of the given size over the `ndarray`.
Since this uses striding, the individual arrays are views rather than copies of `ndarray`.
Changes to one view modifies the others and the original.
Examples
--------
>>> from gensim.utils import strided_windows
>>> strided_windows(np.arange(5), 2)
array([[0, 1],
[1, 2],
[2, 3],
[3, 4]])
>>> strided_windows(np.arange(10), 5)
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9]])
"""
ndarray = np.asarray(ndarray)
if window_size == ndarray.shape[0]:
return np.array([ndarray])
elif window_size > ndarray.shape[0]:
return np.ndarray((0, 0))
stride = ndarray.strides[0]
return np.lib.stride_tricks.as_strided(
ndarray, shape=(ndarray.shape[0] - window_size + 1, window_size),
strides=(stride, stride))
def iter_windows(texts, window_size, copy=False, ignore_below_size=True, include_doc_num=False):
"""Produce a generator over the given texts using a sliding window of `window_size`.
The windows produced are views of some subsequence of a text.
To use deep copies instead, pass `copy=True`.
Parameters
----------
texts : list of str
List of string sentences.
window_size : int
Size of sliding window.
copy : bool, optional
If True - produce deep copies.
ignore_below_size : bool, optional
If True - ignore documents that are not at least `window_size` in length.
include_doc_num : bool, optional
If True - will be yield doc_num too.
"""
for doc_num, document in enumerate(texts):
for window in _iter_windows(document, window_size, copy, ignore_below_size):
if include_doc_num:
yield (doc_num, window)
else:
yield window
def _iter_windows(document, window_size, copy=False, ignore_below_size=True):
doc_windows = strided_windows(document, window_size)
if doc_windows.shape[0] == 0:
if not ignore_below_size:
yield document.copy() if copy else document
else:
for doc_window in doc_windows:
yield doc_window.copy() if copy else doc_window
def flatten(nested_list):
"""Recursively flatten out a nested list.
Parameters
----------
nested_list : list
Possibly nested list.
Returns
-------
list
Flattened version of input, where any list elements have been unpacked into the top-level list
in a recursive fashion.
"""
return list(lazy_flatten(nested_list))
def lazy_flatten(nested_list):
"""Lazy version of :func:`~gensim.utils.flatten`.
Parameters
----------
nested_list : list
Possibly nested list.
Yields
------
object
Element of list
"""
for el in nested_list:
if isinstance(el, collections.Iterable) and not isinstance(el, string_types):
for sub in flatten(el):
yield sub
else:
yield el
| 59,164 | 29.217058 | 120 | py |
poincare_glove | poincare_glove-master/gensim/interfaces.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains implementations of basic interfaces used across the whole gensim package.
These interfaces usable for building corpus, transformation and similarity classes.
All interfaces are realized as abstract base classes (i.e. some optional functionality is provided in the interface
itself, so that the interfaces should be inherited).
"""
from __future__ import with_statement
import logging
from gensim import utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
class CorpusABC(utils.SaveLoad):
"""Interface for corpus classes from :mod:`gensim.corpora`.
Corpus is simply an iterable object, where each iteration step yields one document:
>>> from gensim.corpora import MmCorpus # this is inheritor of CorpusABC class
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> for doc in corpus:
... pass # do something with the doc...
A document represented in bag-of-word (BoW) format, i.e. list of (attr_id, attr_value),
like ``[(1, 0.2), (4, 0.6), ...]``.
>>> from gensim.corpora import MmCorpus # this is inheritor of CorpusABC class
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> doc = next(iter(corpus))
>>> print(doc)
[(0, 1.0), (1, 1.0), (2, 1.0)]
Remember, that save/load methods save only corpus class (not corpus as data itself),
for save/load functionality, please use this pattern :
>>> from gensim.corpora import MmCorpus # this is inheritor of CorpusABC class
>>> from gensim.test.utils import datapath, get_tmpfile
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> tmp_path = get_tmpfile("temp_corpus.mm")
>>>
>>> MmCorpus.serialize(tmp_path, corpus) # serialize corpus to disk in MmCorpus format
>>> # MmCorpus.save_corpus(tmp_path, corpus) # this variant also possible, but if serialize availbe - call it.
>>> loaded_corpus = MmCorpus(tmp_path) # load corpus through constructor
>>> for (doc_1, doc_2) in zip(corpus, loaded_corpus):
... assert doc_1 == doc_2 # check that corpuses exactly same
See Also
--------
:mod:`gensim.corpora`
Corpuses in different formats
"""
def __iter__(self):
"""Iterate over corpus, **should be overridden in inheritor class**.
Raises
------
NotImplementedError
Since it's abstract class this iterator protocol should be overwritten in the inherited class.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def save(self, *args, **kwargs):
"""Saves corpus in-memory state.
Warnings
--------
This save only "state" of corpus class (not corpus-data at all),
for saving data please use :meth:`~gensim.interfaces.CorpusABC.save_corpus` instead`.
Parameters
----------
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
import warnings
warnings.warn(
"corpus.save() stores only the (tiny) iteration object; "
"to serialize the actual corpus content, use e.g. MmCorpus.serialize(corpus)"
)
super(CorpusABC, self).save(*args, **kwargs)
def __len__(self):
"""Get size of the corpus (number of documents), **should be overridden in inheritor class**.
Raises
------
NotImplementedError
Since it's abstract class this method should be reimplemented later.
"""
raise NotImplementedError("must override __len__() before calling len(corpus)")
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Saves given `corpus` to disk, **should be overridden in inheritor class**.
Some formats support saving the dictionary (`feature_id -> word` mapping),
which can be provided by the optional `id2word` parameter.
Notes
-----
Some corpus also support an index of where each document begins, so that the documents on disk
can be accessed in O(1) time (see the :class:`gensim.corpora.indexedcorpus.IndexedCorpus` base class).
In this case, :meth:`~gensim.interfaces.CorpusABC.save_corpus` is automatically called internally by
:func:`serialize`, which does :meth:`~gensim.interfaces.CorpusABC.save_corpus` plus saves the index
at the same time.
Calling :func:`serialize() is preferred to calling :meth:`~gensim.interfaces.CorpusABC.save_corpus`.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of list of (int, number)
Corpus in BoW format.
id2word : :class:`~gensim.corpora.Dictionary`, optional
Dictionary of corpus.
metadata : bool, optional
If True, will write some meta-information to `fname` too.
"""
raise NotImplementedError('cannot instantiate abstract base class')
class TransformedCorpus(CorpusABC):
"""Interface for corpus supports transformations."""
def __init__(self, obj, corpus, chunksize=None, **kwargs):
"""
Parameters
----------
obj : object
Some corpus class from :mod:`gensim.corpora`.
corpus : iterable of list of (int, number)
Corpus in BoW format.
chunksize : int, optional
If provided - more effective processing (by group of documents) will performed.
kwargs
Arbitrary keyword arguments.
"""
self.obj, self.corpus, self.chunksize = obj, corpus, chunksize
# add the new parameters like per_word_topics to base class object of LdaModel
for key, value in kwargs.items():
setattr(self.obj, key, value)
self.metadata = False
def __len__(self):
"""Get size of the corpus."""
return len(self.corpus)
def __iter__(self):
"""Iterate over the corpus.
If `chunksize` is set, works in "batch-manner" (more efficient).
Yields
------
list of (int, number)
Document in BoW format
"""
if self.chunksize:
for chunk in utils.grouper(self.corpus, self.chunksize):
for transformed in self.obj.__getitem__(chunk, chunksize=None):
yield transformed
else:
for doc in self.corpus:
yield self.obj[doc]
def __getitem__(self, docno):
"""Get element from corpus index `docno`.
Parameters
----------
docno : int
Index of document in corpus.
Returns
-------
list of (int, number)
Document in BoW format
Raises
------
RuntimeError
If corpus doesn't support slicing (:meth`__getitem__` doesn't exists).
"""
if hasattr(self.corpus, '__getitem__'):
return self.obj[self.corpus[docno]]
else:
raise RuntimeError('Type {} does not support slicing.'.format(type(self.corpus)))
class TransformationABC(utils.SaveLoad):
"""Transformation interface.
A 'transformation' is any object which accepts document in BoW format via the `__getitem__` (notation `[]`)
and returns another sparse document in its stead:
>>> from gensim.models import LsiModel
>>> from gensim.test.utils import common_dictionary, common_corpus
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> bow_vector = model[common_corpus[0]] # model applied through __getitem__ on document from corpus.
>>> bow_corpus = model[common_corpus] # also, we can apply model on full corpus
"""
def __getitem__(self, vec):
"""Get element of `transformations`, **should be overridden in inheritor class**.
Transforms vector from one vector space into another **or** whole corpus into another.
Parameters
----------
vec : object
Given vector.
Raises
------
NotImplementedError
Since it's abstract class this method should be reimplemented later.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def _apply(self, corpus, chunksize=None, **kwargs):
"""Apply the transformation to a whole corpus and get the result as another corpus.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
chunksize : int, optional
If provided - more effective processing (by group of documents) will performed.
kwargs
Arbitrary keyword arguments.
Returns
-------
:class:`~gensim.interfaces.TransformedCorpus`
Transformed corpus.
"""
return TransformedCorpus(self, corpus, chunksize, **kwargs)
class SimilarityABC(utils.SaveLoad):
"""Interface for similarity search over a corpus.
In all instances, there is a corpus against which we want to perform the similarity search.
For each similarity search, the input is a document and the output are its similarities
to individual corpus documents.
Examples
--------
>>> from gensim.similarities import MatrixSimilarity
>>> from gensim.test.utils import common_dictionary, common_corpus
>>>
>>> index = MatrixSimilarity(common_corpus)
>>> similarities = index.get_similarities(common_corpus[1]) # get similarities between query and corpus
Notes
-----
There is also a convenience wrapper, where iterating over `self` yields similarities of each document in the corpus
against the whole corpus (i.e. the query is each corpus document in turn).
See Also
--------
:mod:`gensim.similarities`
Provided different type of indexes for search.
"""
def __init__(self, corpus):
"""Initialization of object, **should be overridden in inheritor class**.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
Raises
------
NotImplementedError
Since it's abstract class this method should be reimplemented later.
"""
raise NotImplementedError("cannot instantiate Abstract Base Class")
def get_similarities(self, doc):
"""Get similarity measures of documents of corpus to given `doc`, **should be overridden in inheritor class**.
Parameters
----------
doc : list of (int, number)
Document in BoW format.
Raises
------
NotImplementedError
Since it's abstract class this method should be reimplemented later.
"""
raise NotImplementedError("cannot instantiate Abstract Base Class")
def __getitem__(self, query):
"""Get access to similarities of document/corpus `query` to all documents in the corpus.
Using :meth:`~gensim.interfaces.SimilarityABC.get_similarities`
Notes
-----
Passing corpus to `query` (instead of document) can be more efficient, because will processed in batching-way.
Parameters
----------
query : {list of (int, int), iterable of list of (int, int)}
Document or corpus in BoW format.
Returns
-------
{`scipy.sparse.csr.csr_matrix`, list of (int, float)}
Similarities given document or corpus and objects corpus, depends on `query`.
"""
is_corpus, query = utils.is_corpus(query)
if self.normalize:
# self.normalize only works if the input is a plain gensim vector/corpus (as
# advertised in the doc). in fact, input can be a numpy or scipy.sparse matrix
# as well, but in that case assume tricks are happening and don't normalize
# anything (self.normalize has no effect).
if not matutils.ismatrix(query):
if is_corpus:
query = [matutils.unitvec(v) for v in query]
else:
query = matutils.unitvec(query)
result = self.get_similarities(query)
if self.num_best is None:
return result
# if maintain_sparsity is True, result is scipy sparse. Sort, clip the
# topn and return as a scipy sparse matrix.
if getattr(self, 'maintain_sparsity', False):
return matutils.scipy2scipy_clipped(result, self.num_best)
# if the input query was a corpus (=more documents), compute the top-n
# most similar for each document in turn
if matutils.ismatrix(result):
return [matutils.full2sparse_clipped(v, self.num_best) for v in result]
else:
# otherwise, return top-n of the single input document
return matutils.full2sparse_clipped(result, self.num_best)
def __iter__(self):
"""Iterate over all documents, computes similarity against all other documents in the index.
Yields
------
{`scipy.sparse.csr.csr_matrix`, list of (int, float)}
Similarity of current document and all documents of corpus.
"""
# turn off query normalization (vectors in the index are assumed to be already normalized)
norm = self.normalize
self.normalize = False
# Try to compute similarities in bigger chunks of documents (not
# one query = a single document after another). The point is, a
# bigger query of N documents is faster than N small queries of one
# document.
#
# After computing similarities of the bigger query in `self[chunk]`,
# yield the resulting similarities one after another, so that it looks
# exactly the same as if they had been computed with many small queries.
try:
chunking = self.chunksize > 1
except AttributeError:
# chunking not supported; fall back to the (slower) mode of 1 query=1 document
chunking = False
if chunking:
# assumes `self.corpus` holds the index as a 2-d numpy array.
# this is true for MatrixSimilarity and SparseMatrixSimilarity, but
# may not be true for other (future) classes..?
for chunk_start in xrange(0, self.index.shape[0], self.chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(self.index.shape[0], chunk_start + self.chunksize)
chunk = self.index[chunk_start: chunk_end]
for sim in self[chunk]:
yield sim
else:
for doc in self.index:
yield self[doc]
# restore old normalization value
self.normalize = norm
| 15,338 | 34.755245 | 119 | py |
poincare_glove | poincare_glove-master/gensim/matutils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains math helper functions."""
from __future__ import with_statement
from itertools import chain
import logging
import math
from gensim import utils
import numpy as np
import scipy.sparse
from scipy.stats import entropy
import scipy.linalg
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.special_matrices import triu
from scipy.special import psi # gamma function utils
from six import iteritems, itervalues, string_types
from six.moves import xrange, zip as izip
logger = logging.getLogger(__name__)
def blas(name, ndarray):
"""Helper for getting BLAS function, used :func:`scipy.linalg.get_blas_funcs`.
Parameters
----------
name : str
Name(s) of BLAS functions without type prefix.
ndarray : numpy.ndarray
Arrays can be given to determine optimal prefix of BLAS routines.
Returns
-------
fortran object
Fortran function for needed operation.
"""
return scipy.linalg.get_blas_funcs((name,), (ndarray,))[0]
def argsort(x, topn=None, reverse=False):
"""Get indices of the `topn` smallest elements in array `x`.
Parameters
----------
x : array_like
Array to sort.
topn : int, optional
Number of indices of the smallest(greatest) elements to be returned if given,
otherwise - indices of all elements will be returned in ascending(descending) order.
reverse : bool, optional
If True - return the `topn` greatest elements, in descending order.
Returns
-------
numpy.ndarray
Array of `topn` indices that.sort the array in the required order.
"""
# print(x.dtype)
x = np.asarray(x) # unify code path for when `x` is not a np array (list, tuple...)
if topn is None:
topn = x.size
if topn <= 0:
return []
if reverse:
x = -x
if topn >= x.size or not hasattr(np, 'argpartition'):
return np.argsort(x)[:topn]
# np >= 1.8 has a fast partial argsort, use that!
most_extreme = np.argpartition(x, topn)[:topn]
return most_extreme.take(np.argsort(x.take(most_extreme))) # resort topn into order
def corpus2csc(corpus, num_terms=None, dtype=np.float64, num_docs=None, num_nnz=None, printprogress=0):
"""Convert a streamed corpus in BoW format into a sparse matrix `scipy.sparse.csc_matrix`,
with documents as columns.
Notes
-----
If the number of terms, documents and non-zero elements is known, you can pass
them here as parameters and a more memory efficient code path will be taken.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus in BoW format
num_terms : int, optional
If provided, the `num_terms` attributes in the corpus will be ignored.
dtype : data-type, optional
Data type of output matrix.
num_docs : int, optional
If provided, the `num_docs` attributes in the corpus will be ignored.
num_nnz : int, optional
If provided, the `num_nnz` attributes in the corpus will be ignored.
printprogress : int, optional
Print progress for every `printprogress` number of documents,
If 0 - nothing will be printed.
Returns
-------
scipy.sparse.csc_matrix
Sparse matrix inferred based on `corpus`.
See Also
--------
:class:`~gensim.matutils.Sparse2Corpus`
"""
try:
# if the input corpus has the `num_nnz`, `num_docs` and `num_terms` attributes
# (as is the case with MmCorpus for example), we can use a more efficient code path
if num_terms is None:
num_terms = corpus.num_terms
if num_docs is None:
num_docs = corpus.num_docs
if num_nnz is None:
num_nnz = corpus.num_nnz
except AttributeError:
pass # not a MmCorpus...
if printprogress:
logger.info("creating sparse matrix from corpus")
if num_terms is not None and num_docs is not None and num_nnz is not None:
# faster and much more memory-friendly version of creating the sparse csc
posnow, indptr = 0, [0]
indices = np.empty((num_nnz,), dtype=np.int32) # HACK assume feature ids fit in 32bit integer
data = np.empty((num_nnz,), dtype=dtype)
for docno, doc in enumerate(corpus):
if printprogress and docno % printprogress == 0:
logger.info("PROGRESS: at document #%i/%i", docno, num_docs)
posnext = posnow + len(doc)
indices[posnow: posnext] = [feature_id for feature_id, _ in doc]
data[posnow: posnext] = [feature_weight for _, feature_weight in doc]
indptr.append(posnext)
posnow = posnext
assert posnow == num_nnz, "mismatch between supplied and computed number of non-zeros"
result = scipy.sparse.csc_matrix((data, indices, indptr), shape=(num_terms, num_docs), dtype=dtype)
else:
# slower version; determine the sparse matrix parameters during iteration
num_nnz, data, indices, indptr = 0, [], [], [0]
for docno, doc in enumerate(corpus):
if printprogress and docno % printprogress == 0:
logger.info("PROGRESS: at document #%i", docno)
indices.extend([feature_id for feature_id, _ in doc])
data.extend([feature_weight for _, feature_weight in doc])
num_nnz += len(doc)
indptr.append(num_nnz)
if num_terms is None:
num_terms = max(indices) + 1 if indices else 0
num_docs = len(indptr) - 1
# now num_docs, num_terms and num_nnz contain the correct values
data = np.asarray(data, dtype=dtype)
indices = np.asarray(indices)
result = scipy.sparse.csc_matrix((data, indices, indptr), shape=(num_terms, num_docs), dtype=dtype)
return result
def pad(mat, padrow, padcol):
"""Add additional rows/columns to `mat`. The new rows/columns will be initialized with zeros.
Parameters
----------
mat : numpy.ndarray
Input 2D matrix
padrow : int
Number of additional rows
padcol : int
Number of additional columns
Returns
-------
numpy.matrixlib.defmatrix.matrix
Matrix with needed padding.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return np.bmat([
[mat, np.matrix(np.zeros((rows, padcol)))],
[np.matrix(np.zeros((padrow, cols + padcol)))],
])
def zeros_aligned(shape, dtype, order='C', align=128):
"""Get array aligned at `align` byte boundary.
Parameters
----------
shape : int or (int, int)
Shape of array.
dtype : data-type
Data type of array.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory.
align : int, optional
Boundary for alignment in bytes.
Returns
-------
numpy.ndarray
Aligned array.
"""
nbytes = np.prod(shape, dtype=np.int64) * np.dtype(dtype).itemsize
buffer = np.zeros(nbytes + align, dtype=np.uint8) # problematic on win64 ("maximum allowed dimension exceeded")
start_index = -buffer.ctypes.data % align
return buffer[start_index: start_index + nbytes].view(dtype).reshape(shape, order=order)
def ismatrix(m):
"""Check does `m` numpy.ndarray or `scipy.sparse` matrix.
Parameters
----------
m : object
Candidate for matrix
Returns
-------
bool
True if `m` is matrix, False otherwise.
"""
return isinstance(m, np.ndarray) and m.ndim == 2 or scipy.sparse.issparse(m)
def any2sparse(vec, eps=1e-9):
"""Convert a numpy.ndarray or `scipy.sparse` vector into gensim BoW format.
Parameters
----------
vec : {`numpy.ndarray`, `scipy.sparse`}
Input vector
eps : float, optional
Value used for threshold, all coordinates less than `eps` will not be presented in result.
Returns
-------
list of (int, float)
Vector in BoW format.
"""
if isinstance(vec, np.ndarray):
return dense2vec(vec, eps)
if scipy.sparse.issparse(vec):
return scipy2sparse(vec, eps)
return [(int(fid), float(fw)) for fid, fw in vec if np.abs(fw) > eps]
def scipy2scipy_clipped(matrix, topn, eps=1e-9):
"""Get a `scipy.sparse` vector / matrix consisting of 'topn' elements of the greatest magnitude (absolute value).
Parameters
----------
matrix : `scipy.sparse`
Input vector / matrix.
topn : int
Number of greatest (by module) elements, that will be in result.
eps : float
PARAMETER IGNORED.
Returns
-------
`scipy.sparse.csr.csr_matrix`
Clipped matrix.
"""
if not scipy.sparse.issparse(matrix):
raise ValueError("'%s' is not a scipy sparse vector." % matrix)
if topn <= 0:
return scipy.sparse.csr_matrix([])
# Return clipped sparse vector if input is a sparse vector.
if matrix.shape[0] == 1:
# use np.argpartition/argsort and only form tuples that are actually returned.
biggest = argsort(abs(matrix.data), topn, reverse=True)
indices, data = matrix.indices.take(biggest), matrix.data.take(biggest)
return scipy.sparse.csr_matrix((data, indices, [0, len(indices)]))
# Return clipped sparse matrix if input is a matrix, processing row by row.
else:
matrix_indices = []
matrix_data = []
matrix_indptr = [0]
# calling abs() on entire matrix once is faster than calling abs() iteratively for each row
matrix_abs = abs(matrix)
for i in range(matrix.shape[0]):
v = matrix.getrow(i)
v_abs = matrix_abs.getrow(i)
# Sort and clip each row vector first.
biggest = argsort(v_abs.data, topn, reverse=True)
indices, data = v.indices.take(biggest), v.data.take(biggest)
# Store the topn indices and values of each row vector.
matrix_data.append(data)
matrix_indices.append(indices)
matrix_indptr.append(matrix_indptr[-1] + min(len(indices), topn))
matrix_indices = np.concatenate(matrix_indices).ravel()
matrix_data = np.concatenate(matrix_data).ravel()
# Instantiate and return a sparse csr_matrix which preserves the order of indices/data.
return scipy.sparse.csr.csr_matrix(
(matrix_data, matrix_indices, matrix_indptr),
shape=(matrix.shape[0], np.max(matrix_indices) + 1)
)
def scipy2sparse(vec, eps=1e-9):
"""Convert a scipy.sparse vector BoW format.
Parameters
----------
vec : `scipy.sparse`
Sparse vector
eps : float, optional
Value used for threshold, all coordinates less than `eps` will not be presented in result.
Returns
-------
list of (int, float)
Vector in BoW format.
"""
vec = vec.tocsr()
assert vec.shape[0] == 1
return [(int(pos), float(val)) for pos, val in zip(vec.indices, vec.data) if np.abs(val) > eps]
class Scipy2Corpus(object):
"""Convert a sequence of dense/sparse vectors into a streamed gensim corpus object.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
"""
def __init__(self, vecs):
"""
Parameters
----------
vecs : iterable of {`numpy.ndarray`, `scipy.sparse`}
Input vectors.
"""
self.vecs = vecs
def __iter__(self):
for vec in self.vecs:
if isinstance(vec, np.ndarray):
yield full2sparse(vec)
else:
yield scipy2sparse(vec)
def __len__(self):
return len(self.vecs)
def sparse2full(doc, length):
"""Convert a document in BoW format into dense numpy array.
Parameters
----------
doc : list of (int, number)
Document in BoW format
length : int
Length of result vector
Returns
-------
numpy.ndarray
Dense variant of `doc` vector.
See Also
--------
:func:`~gensim.matutils.full2sparse`
"""
result = np.zeros(length, dtype=np.float32) # fill with zeroes (default value)
# convert indices to int as numpy 1.12 no longer indexes by floats
doc = ((int(id_), float(val_)) for (id_, val_) in doc)
doc = dict(doc)
# overwrite some of the zeroes with explicit values
result[list(doc)] = list(itervalues(doc))
return result
def full2sparse(vec, eps=1e-9):
"""Convert a dense array into the BoW format.
Parameters
----------
vec : numpy.ndarray
Input dense vector
eps : float
Threshold value, if coordinate in `vec` < eps, this will not be presented in result.
Returns
-------
list of (int, float)
BoW format of `vec`.
See Also
--------
:func:`~gensim.matutils.sparse2full`
"""
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
return list(zip(nnz, vec.take(nnz)))
dense2vec = full2sparse
def full2sparse_clipped(vec, topn, eps=1e-9):
"""Like :func:`~gensim.matutils.full2sparse`, but only return the `topn` elements of the greatest magnitude (abs).
Parameters
----------
vec : numpy.ndarray
Input dense vector
topn : int
Number of greatest (abs) elements that will be presented in result.
eps : float
Threshold value, if coordinate in `vec` < eps, this will not be presented in result.
Returns
-------
list of (int, float)
Clipped vector in BoW format.
See Also
--------
:func:`~gensim.matutils.full2sparse`
"""
# use np.argpartition/argsort and only form tuples that are actually returned.
# this is about 40x faster than explicitly forming all 2-tuples to run sort() or heapq.nlargest() on.
if topn <= 0:
return []
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
biggest = nnz.take(argsort(abs(vec).take(nnz), topn, reverse=True))
return list(zip(biggest, vec.take(biggest)))
def corpus2dense(corpus, num_terms, num_docs=None, dtype=np.float32):
"""Convert corpus into a dense numpy array (documents will be columns).
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus in BoW format.
num_terms : int
Number of terms in dictionary (will be used as size of output vector.
num_docs : int, optional
Number of documents in corpus.
dtype : data-type, optional
Data type of output matrix
Returns
-------
numpy.ndarray
Dense array that present `corpus`.
See Also
--------
:class:`~gensim.matutils.Dense2Corpus`
"""
if num_docs is not None:
# we know the number of documents => don't bother column_stacking
docno, result = -1, np.empty((num_terms, num_docs), dtype=dtype)
for docno, doc in enumerate(corpus):
result[:, docno] = sparse2full(doc, num_terms)
assert docno + 1 == num_docs
else:
result = np.column_stack(sparse2full(doc, num_terms) for doc in corpus)
return result.astype(dtype)
class Dense2Corpus(object):
"""Treat dense numpy array as a streamed gensim corpus in BoW format.
Notes
-----
No data copy is made (changes to the underlying matrix imply changes in the corpus).
See Also
--------
:func:`~gensim.matutils.corpus2dense`
:class:`~gensim.matutils.Sparse2Corpus`
"""
def __init__(self, dense, documents_columns=True):
"""
Parameters
----------
dense : numpy.ndarray
Corpus in dense format.
documents_columns : bool, optional
If True - documents will be column, rows otherwise.
"""
if documents_columns:
self.dense = dense.T
else:
self.dense = dense
def __iter__(self):
"""Iterate over corpus
Yields
------
list of (int, float)
Document in BoW format.
"""
for doc in self.dense:
yield full2sparse(doc.flat)
def __len__(self):
return len(self.dense)
class Sparse2Corpus(object):
"""Convert a matrix in scipy.sparse format into a streaming gensim corpus.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
:class:`~gensim.matutils.Dense2Corpus`
"""
def __init__(self, sparse, documents_columns=True):
"""
Parameters
----------
sparse : `scipy.sparse`
Corpus scipy sparse format
documents_columns : bool, optional
If True - documents will be column, rows otherwise.
"""
if documents_columns:
self.sparse = sparse.tocsc()
else:
self.sparse = sparse.tocsr().T # make sure shape[1]=number of docs (needed in len())
def __iter__(self):
"""
Yields
------
list of (int, float)
Document in BoW format.
"""
for indprev, indnow in izip(self.sparse.indptr, self.sparse.indptr[1:]):
yield list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))
def __len__(self):
return self.sparse.shape[1]
def __getitem__(self, document_index):
"""Get a single document in the corpus by its index.
Parameters
----------
document_index : int
Index of document
Returns
-------
list of (int, number)
Document in BoW format.
"""
indprev = self.sparse.indptr[document_index]
indnow = self.sparse.indptr[document_index + 1]
return list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))
def veclen(vec):
"""Calculate length of vector
Parameters
----------
vec : list of (int, number)
Input vector in BoW format.
Returns
-------
float
Length of `vec`.
"""
if len(vec) == 0:
return 0.0
length = 1.0 * math.sqrt(sum(val**2 for _, val in vec))
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
return length
def ret_normalized_vec(vec, length):
"""Normalize vector.
Parameters
----------
vec : list of (int, number)
Input vector in BoW format.
length : float
Length of vector
Returns
-------
list of (int, number)
Normalized vector in BoW format.
"""
if length != 1.0:
return [(termid, val / length) for termid, val in vec]
else:
return list(vec)
def ret_log_normalize_vec(vec, axis=1):
log_max = 100.0
if len(vec.shape) == 1:
max_val = np.max(vec)
log_shift = log_max - np.log(len(vec) + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift))
log_norm = np.log(tot) - log_shift
vec -= log_norm
else:
if axis == 1: # independently normalize each sample
max_val = np.max(vec, 1)
log_shift = log_max - np.log(vec.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
vec = vec - log_norm[:, np.newaxis]
elif axis == 0: # normalize each feature
k = ret_log_normalize_vec(vec.T)
return k[0].T, k[1]
else:
raise ValueError("'%s' is not a supported axis" % axis)
return vec, log_norm
blas_nrm2 = blas('nrm2', np.array([], dtype=float))
blas_scal = blas('scal', np.array([], dtype=float))
def unitvec(vec, norm='l2'):
"""Scale a vector to unit length.
Parameters
----------
vec : {numpy.ndarray, scipy.sparse, list of (int, float)}
Input vector in any format
norm : {'l1', 'l2'}, optional
Normalization that will be used.
Returns
-------
{numpy.ndarray, scipy.sparse, list of (int, float)}
Normalized vector in same format as `vec`.
Notes
-----
Zero-vector will be unchanged.
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm. Currently supported norms are 'l1' and 'l2'." % norm)
if scipy.sparse.issparse(vec):
vec = vec.tocsr()
if norm == 'l1':
veclen = np.sum(np.abs(vec.data))
if norm == 'l2':
veclen = np.sqrt(np.sum(vec.data ** 2))
if veclen > 0.0:
return vec / veclen
else:
return vec
if isinstance(vec, np.ndarray):
vec = np.asarray(vec, dtype=float)
if norm == 'l1':
veclen = np.sum(np.abs(vec))
if norm == 'l2':
veclen = blas_nrm2(vec)
if veclen > 0.0:
return blas_scal(1.0 / veclen, vec)
else:
return vec
try:
first = next(iter(vec)) # is there at least one element?
except StopIteration:
return vec
if isinstance(first, (tuple, list)) and len(first) == 2: # gensim sparse format
if norm == 'l1':
length = float(sum(abs(val) for _, val in vec))
if norm == 'l2':
length = 1.0 * math.sqrt(sum(val ** 2 for _, val in vec))
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
return ret_normalized_vec(vec, length)
else:
raise ValueError("unknown input type")
def cossim(vec1, vec2):
"""Get cosine similarity between two sparse vectors.
The similarity is a number between <-1.0, 1.0>, higher is more similar.
Parameters
----------
vec1 : list of (int, float)
Vector in BoW format
vec2 : list of (int, float)
Vector in BoW format
Returns
-------
float
Cosine similarity between `vec1` and `vec2`.
"""
vec1, vec2 = dict(vec1), dict(vec2)
if not vec1 or not vec2:
return 0.0
vec1len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec1)))
vec2len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec2)))
assert vec1len > 0.0 and vec2len > 0.0, "sparse documents must not contain any explicit zero entries"
if len(vec2) < len(vec1):
vec1, vec2 = vec2, vec1 # swap references so that we iterate over the shorter vector
result = sum(value * vec2.get(index, 0.0) for index, value in iteritems(vec1))
result /= vec1len * vec2len # rescale by vector lengths
return result
def softcossim(vec1, vec2, similarity_matrix):
"""Get Soft Cosine Measure between two vectors given a term similarity matrix.
Return Soft Cosine Measure between two sparse vectors given a sparse term similarity matrix
in the :class:`scipy.sparse.csc_matrix` format. The similarity is a number between <-1.0, 1.0>,
higher is more similar.
Parameters
----------
vec1 : list of (int, float)
A query vector in the BoW format.
vec2 : list of (int, float)
A document vector in the BoW format.
similarity_matrix : {:class:`scipy.sparse.csc_matrix`, :class:`scipy.sparse.csr_matrix`}
A term similarity matrix, typically produced by
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity_matrix`.
Returns
-------
`similarity_matrix.dtype`
The Soft Cosine Measure between `vec1` and `vec2`.
Raises
------
ValueError
When the term similarity matrix is in an unknown format.
See Also
--------
:meth:`gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity_matrix`
A term similarity matrix produced from term embeddings.
:class:`gensim.similarities.docsim.SoftCosineSimilarity`
A class for performing corpus-based similarity queries with Soft Cosine Measure.
References
----------
Soft Cosine Measure was perhaps first defined by [sidorovetal14]_.
.. [sidorovetal14] Grigori Sidorov et al., "Soft Similarity and Soft Cosine Measure: Similarity
of Features in Vector Space Model", 2014, http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/2043/1921.
"""
if not isinstance(similarity_matrix, scipy.sparse.csc_matrix):
if isinstance(similarity_matrix, scipy.sparse.csr_matrix):
similarity_matrix = similarity_matrix.T
else:
raise ValueError('unknown similarity matrix format')
if not vec1 or not vec2:
return 0.0
vec1 = dict(vec1)
vec2 = dict(vec2)
word_indices = sorted(set(chain(vec1, vec2)))
dtype = similarity_matrix.dtype
vec1 = np.array([vec1[i] if i in vec1 else 0 for i in word_indices], dtype=dtype)
vec2 = np.array([vec2[i] if i in vec2 else 0 for i in word_indices], dtype=dtype)
dense_matrix = similarity_matrix[[[i] for i in word_indices], word_indices].todense()
vec1len = vec1.T.dot(dense_matrix).dot(vec1)[0, 0]
vec2len = vec2.T.dot(dense_matrix).dot(vec2)[0, 0]
assert \
vec1len > 0.0 and vec2len > 0.0, \
u"sparse documents must not contain any explicit zero entries and the similarity matrix S " \
u"must satisfy x^T * S * x > 0 for any nonzero bag-of-words vector x."
result = vec1.T.dot(dense_matrix).dot(vec2)[0, 0]
result /= math.sqrt(vec1len) * math.sqrt(vec2len) # rescale by vector lengths
return np.clip(result, -1.0, 1.0)
def isbow(vec):
"""Checks if vector passed is in BoW format.
Parameters
----------
vec : object
Input vector in any format
Returns
-------
bool
True if vector in BoW format, False otherwise.
"""
if scipy.sparse.issparse(vec):
vec = vec.todense().tolist()
try:
id_, val_ = vec[0] # checking first value to see if it is in bag of words format by unpacking
int(id_), float(val_)
except IndexError:
return True # this is to handle the empty input case
except (ValueError, TypeError):
return False
return True
def convert_vec(vec1, vec2, num_features=None):
"""Convert vectors to dense format
Parameters
----------
vec1 : {scipy.sparse, list of (int, float)}
Input vector.
vec2 : {scipy.sparse, list of (int, float)}
Input vector.
num_features : int, optional
Number of features in vector.
Returns
-------
(numpy.ndarray, numpy.ndarray)
(`vec1`, `vec2`) in dense format.
"""
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray() # converted both the vectors to dense in case they were in sparse matrix
if isbow(vec1) and isbow(vec2): # if they are in bag of words format we make it dense
if num_features is not None: # if not None, make as large as the documents drawing from
dense1 = sparse2full(vec1, num_features)
dense2 = sparse2full(vec2, num_features)
return dense1, dense2
else:
max_len = max(len(vec1), len(vec2))
dense1 = sparse2full(vec1, max_len)
dense2 = sparse2full(vec2, max_len)
return dense1, dense2
else:
# this conversion is made because if it is not in bow format, it might be a list within a list after conversion
# the scipy implementation of Kullback fails in such a case so we pick up only the nested list.
if len(vec1) == 1:
vec1 = vec1[0]
if len(vec2) == 1:
vec2 = vec2[0]
return vec1, vec2
def kullback_leibler(vec1, vec2, num_features=None):
"""Calculate Kullback-Leibler distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in vector.
Returns
-------
float
Kullback-Leibler distance between `vec1` and `vec2`.
Value in range [0, +∞) where values closer to 0 mean less distance (and a higher similarity).
"""
vec1, vec2 = convert_vec(vec1, vec2, num_features=num_features)
return entropy(vec1, vec2)
def jensen_shannon(vec1, vec2, num_features=None):
"""Calculate Jensen-Shannon distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in vector.
Returns
-------
float
Jensen-Shannon distance between `vec1` and `vec2`.
Notes
-----
This is symmetric and finite "version" of :func:`gensim.matutils.kullback_leibler`.
"""
vec1, vec2 = convert_vec(vec1, vec2, num_features=num_features)
avg_vec = 0.5 * (vec1 + vec2)
return 0.5 * (entropy(vec1, avg_vec) + entropy(vec2, avg_vec))
def hellinger(vec1, vec2):
"""Calculate Hellinger distance between two probability distributions.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Hellinger distance between `vec1` and `vec2`.
Value in range [0, 1], where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it is a BoW format, instead of converting to dense we use dictionaries to calculate appropriate distance
vec1, vec2 = dict(vec1), dict(vec2)
indices = set(list(vec1.keys()) + list(vec2.keys()))
sim = np.sqrt(
0.5 * sum((np.sqrt(vec1.get(index, 0.0)) - np.sqrt(vec2.get(index, 0.0)))**2 for index in indices)
)
return sim
else:
sim = np.sqrt(0.5 * ((np.sqrt(vec1) - np.sqrt(vec2))**2).sum())
return sim
def jaccard(vec1, vec2):
"""Calculate Jaccard distance between vectors.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Jaccard distance between `vec1` and `vec2`.
Value in range [0, 1], where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
# converting from sparse for easier manipulation
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it's in bow format, we use the following definitions:
# union = sum of the 'weights' of both the bags
# intersection = lowest weight for a particular id; basically the number of common words or items
union = sum(weight for id_, weight in vec1) + sum(weight for id_, weight in vec2)
vec1, vec2 = dict(vec1), dict(vec2)
intersection = 0.0
for feature_id, feature_weight in iteritems(vec1):
intersection += min(feature_weight, vec2.get(feature_id, 0.0))
return 1 - float(intersection) / float(union)
else:
# if it isn't in bag of words format, we can use sets to calculate intersection and union
if isinstance(vec1, np.ndarray):
vec1 = vec1.tolist()
if isinstance(vec2, np.ndarray):
vec2 = vec2.tolist()
vec1 = set(vec1)
vec2 = set(vec2)
intersection = vec1 & vec2
union = vec1 | vec2
return 1 - float(len(intersection)) / float(len(union))
def jaccard_distance(set1, set2):
"""Calculate Jaccard distance between two sets
Parameters
----------
set1 : set
Input set.
set2 : set
Input set.
Returns
-------
float
Jaccard distance between `set1` and `set2`.
Value in range [0, 1], where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
union_cardinality = len(set1 | set2)
if union_cardinality == 0: # Both sets are empty
return 1.
return 1. - float(len(set1 & set2)) / float(union_cardinality)
try:
# try to load fast, cythonized code if possible
from gensim._matutils import logsumexp, mean_absolute_difference, dirichlet_expectation
except ImportError:
def logsumexp(x):
"""Log of sum of exponentials.
Parameters
----------
x : numpy.ndarray
Input 2d matrix.
Returns
-------
float
log of sum of exponentials of elements in `x`.
Warnings
--------
By performance reasons, doesn't support NaNs or 1d, 3d, etc arrays like :func:`scipy.special.logsumexp`.
"""
x_max = np.max(x)
x = np.log(np.sum(np.exp(x - x_max)))
x += x_max
return x
def mean_absolute_difference(a, b):
"""Mean absolute difference between two arrays.
Parameters
----------
a : numpy.ndarray
Input 1d array.
b : numpy.ndarray
Input 1d array.
Returns
-------
float
mean(abs(a - b)).
"""
return np.mean(np.abs(a - b))
def dirichlet_expectation(alpha):
"""Expected value of log(theta) where theta is drawn from a Dirichlet distribution.
Parameters
----------
alpha : numpy.ndarray
Dirichlet parameter 2d matrix or 1d vector, if 2d - each row is treated as a separate parameter vector.
Returns
-------
numpy.ndarray
Log of expected values, dimension same as `alpha.ndim`.
"""
if len(alpha.shape) == 1:
result = psi(alpha) - psi(np.sum(alpha))
else:
result = psi(alpha) - psi(np.sum(alpha, 1))[:, np.newaxis]
return result.astype(alpha.dtype, copy=False) # keep the same precision as input
def qr_destroy(la):
"""Get QR decomposition of `la[0]`.
Notes
-----
Using this function should be less memory intense than calling `scipy.linalg.qr(la[0])`,
because the memory used in `la[0]` is reclaimed earlier.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Matrices :math:`Q` and :math:`R`.
Warnings
--------
Content of `la` gets destroyed in the process.
"""
a = np.asfortranarray(la[0])
del la[0], la # now `a` is the only reference to the input matrix
m, n = a.shape
# perform q, r = QR(a); code hacked out of scipy.linalg.qr
logger.debug("computing QR of %s dense matrix", str(a.shape))
geqrf, = get_lapack_funcs(('geqrf',), (a,))
qr, tau, work, info = geqrf(a, lwork=-1, overwrite_a=True)
qr, tau, work, info = geqrf(a, lwork=work[0], overwrite_a=True)
del a # free up mem
assert info >= 0
r = triu(qr[:n, :n])
if m < n: # rare case, #features < #topics
qr = qr[:, :m] # retains fortran order
gorgqr, = get_lapack_funcs(('orgqr',), (qr,))
q, work, info = gorgqr(qr, tau, lwork=-1, overwrite_a=True)
q, work, info = gorgqr(qr, tau, lwork=work[0], overwrite_a=True)
assert info >= 0, "qr failed"
assert q.flags.f_contiguous
return q, r
class MmWriter(object):
"""Store a corpus in Matrix Market format, used for :class:`~gensim.corpora.mmcorpus.MmCorpus`.
Notes
-----
Output is written one document at a time, not the whole matrix at once (unlike `scipy.io.mmread`).
This allows us to process corpora which are larger than the available RAM.
The output file is created in a single pass through the input corpus, so that the input can be
a once-only stream (iterator). To achieve this, a fake MM header is written first, statistics are collected
during the pass (shape of the matrix, number of non-zeroes), followed by a seek back to the beginning of the file,
rewriting the fake header with proper values.
"""
HEADER_LINE = b'%%MatrixMarket matrix coordinate real general\n' # the only supported MM format
def __init__(self, fname):
"""
Parameters
----------
fname : str
Path to output file.
"""
self.fname = fname
if fname.endswith(".gz") or fname.endswith('.bz2'):
raise NotImplementedError("compressed output not supported with MmWriter")
self.fout = utils.smart_open(self.fname, 'wb+') # open for both reading and writing
self.headers_written = False
def write_headers(self, num_docs, num_terms, num_nnz):
"""Write headers to file.
Parameters
----------
num_docs : int
Number of documents in corpus.
num_terms : int
Number of term in corpus.
num_nnz : int
Number of non-zero elements in corpus.
"""
self.fout.write(MmWriter.HEADER_LINE)
if num_nnz < 0:
# we don't know the matrix shape/density yet, so only log a general line
logger.info("saving sparse matrix to %s", self.fname)
self.fout.write(utils.to_utf8(' ' * 50 + '\n')) # 48 digits must be enough for everybody
else:
logger.info(
"saving sparse %sx%s matrix with %i non-zero entries to %s",
num_docs, num_terms, num_nnz, self.fname
)
self.fout.write(utils.to_utf8('%s %s %s\n' % (num_docs, num_terms, num_nnz)))
self.last_docno = -1
self.headers_written = True
def fake_headers(self, num_docs, num_terms, num_nnz):
"""Write "fake" headers to file.
Parameters
----------
num_docs : int
Number of documents in corpus.
num_terms : int
Number of term in corpus.
num_nnz : int
Number of non-zero elements in corpus.
"""
stats = '%i %i %i' % (num_docs, num_terms, num_nnz)
if len(stats) > 50:
raise ValueError('Invalid stats: matrix too large!')
self.fout.seek(len(MmWriter.HEADER_LINE))
self.fout.write(utils.to_utf8(stats))
def write_vector(self, docno, vector):
"""Write a single sparse vector to the file.
Parameters
----------
docno : int
Number of document.
vector : list of (int, number)
Document in BoW format.
Returns
-------
(int, int)
Max word index in vector and len of vector. If vector is empty, return (-1, 0).
"""
assert self.headers_written, "must write Matrix Market file headers before writing data!"
assert self.last_docno < docno, "documents %i and %i not in sequential order!" % (self.last_docno, docno)
vector = sorted((i, w) for i, w in vector if abs(w) > 1e-12) # ignore near-zero entries
for termid, weight in vector: # write term ids in sorted order
# +1 because MM format starts counting from 1
self.fout.write(utils.to_utf8("%i %i %s\n" % (docno + 1, termid + 1, weight)))
self.last_docno = docno
return (vector[-1][0], len(vector)) if vector else (-1, 0)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False, num_terms=None, metadata=False):
"""Save the corpus to disk in Matrix Market format.
Parameters
----------
fname : str
Filename of the resulting file.
corpus : iterable of list of (int, number)
Corpus in Bow format.
progress_cnt : int, optional
Print progress for every `progress_cnt` number of documents.
index : bool, optional
If True, the offsets will be return, otherwise return None.
num_terms : int, optional
If provided, the `num_terms` attributes in the corpus will be ignored.
metadata : bool, optional
If True, a metadata file will be generated.
Returns
-------
offsets : {list of int, None}
List of offsets (if index=True) or nothing.
Notes
-----
Documents are processed one at a time, so the whole corpus is allowed to be larger than the available RAM.
See Also
--------
:func:`~gensim.corpora.mmcorpus.MmCorpus.save_corpus`
"""
mw = MmWriter(fname)
# write empty headers to the file (with enough space to be overwritten later)
mw.write_headers(-1, -1, -1) # will print 50 spaces followed by newline on the stats line
# calculate necessary header info (nnz elements, num terms, num docs) while writing out vectors
_num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
if hasattr(corpus, 'metadata'):
orig_metadata = corpus.metadata
corpus.metadata = metadata
if metadata:
docno2metadata = {}
else:
metadata = False
for docno, doc in enumerate(corpus):
if metadata:
bow, data = doc
docno2metadata[docno] = data
else:
bow = doc
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i", docno)
if index:
posnow = mw.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
max_id, veclen = mw.write_vector(docno, bow)
_num_terms = max(_num_terms, 1 + max_id)
num_nnz += veclen
if metadata:
utils.pickle(docno2metadata, fname + '.metadata.cpickle')
corpus.metadata = orig_metadata
num_docs = docno + 1
num_terms = num_terms or _num_terms
if num_docs * num_terms != 0:
logger.info(
"saved %ix%i matrix, density=%.3f%% (%i/%i)",
num_docs, num_terms, 100.0 * num_nnz / (num_docs * num_terms), num_nnz, num_docs * num_terms
)
# now write proper headers, by seeking and overwriting the spaces written earlier
mw.fake_headers(num_docs, num_terms, num_nnz)
mw.close()
if index:
return offsets
def __del__(self):
"""Close `self.fout` file, alias for :meth:`~gensim.matutils.MmWriter.close`.
Warnings
--------
Closing the file explicitly via the close() method is preferred and safer.
"""
self.close() # does nothing if called twice (on an already closed file), so no worries
def close(self):
"""Close `self.fout` file."""
logger.debug("closing %s", self.fname)
if hasattr(self, 'fout'):
self.fout.close()
try:
# try to load fast, cythonized code if possible
from gensim.corpora._mmreader import MmReader
except ImportError:
FAST_VERSION = -1
class MmReader(object):
"""Matrix market file reader, used for :class:`~gensim.corpora.mmcorpus.MmCorpus`.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the rows (~documents).
Attributes
----------
num_docs : int
number of documents in market matrix file
num_terms : int
number of terms
num_nnz : int
number of non-zero terms
Notes
----------
Note that the file is read into memory one document at a time, not the whole matrix at once
(unlike :meth:`~scipy.io.mmread`). This allows us to process corpora which are larger than the available RAM.
"""
def __init__(self, input, transposed=True):
"""
Parameters
----------
input : {str, file-like object}
Path to input file in MM format or a file-like object that supports `seek()`
(e.g. :class:`~gzip.GzipFile`, :class:`~bz2.BZ2File`).
transposed : bool, optional
if True, expects lines to represent doc_id, term_id, value. Else, expects term_id, doc_id, value.
"""
logger.info("initializing corpus reader from %s", input)
self.input, self.transposed = input, transposed
with utils.open_file(self.input) as lines:
try:
header = utils.to_unicode(next(lines)).strip()
if not header.lower().startswith('%%matrixmarket matrix coordinate real general'):
raise ValueError(
"File %s not in Matrix Market format with coordinate real general; instead found: \n%s" %
(self.input, header)
)
except StopIteration:
pass
self.num_docs = self.num_terms = self.num_nnz = 0
for lineno, line in enumerate(lines):
line = utils.to_unicode(line)
if not line.startswith('%'):
self.num_docs, self.num_terms, self.num_nnz = (int(x) for x in line.split())
if not self.transposed:
self.num_docs, self.num_terms = self.num_terms, self.num_docs
break
logger.info(
"accepted corpus with %i documents, %i features, %i non-zero entries",
self.num_docs, self.num_terms, self.num_nnz
)
def __len__(self):
"""Get size of corpus (number of documents)."""
return self.num_docs
def __str__(self):
return ("MmCorpus(%i documents, %i features, %i non-zero entries)" %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
"""Skip file headers that appear before the first document.
Parameters
----------
input_file : iterable of str
Iterable taken from file in MM format.
"""
for line in input_file:
if line.startswith(b'%'):
continue
break
def __iter__(self):
"""Iterate through corpus.
Notes
------
Note that the total number of vectors returned is always equal to the number of rows specified
in the header, empty documents are inserted and yielded where appropriate, even if they are not explicitly
stored in the Matrix Market file.
Yields
------
(int, list of (int, number))
Document id and Document in BoW format
"""
with utils.file_or_filename(self.input) as lines:
self.skip_headers(lines)
previd = -1
for line in lines:
docid, termid, val = utils.to_unicode(line).split() # needed for python3
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid, termid, val = int(docid) - 1, int(termid) - 1, float(val)
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
# change of document: return the document read so far (its id is prevId)
if previd >= 0:
yield previd, document # noqa:F821
# return implicit (empty) documents between previous id and new id
# too, to keep consistent document numbering and corpus length
for previd in xrange(previd + 1, docid):
yield previd, []
# from now on start adding fields to a new document, with a new id
previd = docid
document = []
document.append((termid, val,)) # add another field to the current document
# handle the last document, as a special case
if previd >= 0:
yield previd, document
# return empty documents between the last explicit document and the number
# of documents as specified in the header
for previd in xrange(previd + 1, self.num_docs):
yield previd, []
def docbyoffset(self, offset):
"""Get document at file offset `offset` (in bytes).
Parameters
----------
offset : int
Offset, in bytes, of desired document.
Returns
------
list of (int, str)
Document in BoW format.
"""
# empty documents are not stored explicitly in MM format, so the index marks
# them with a special offset, -1.
if offset == -1:
return []
if isinstance(self.input, string_types):
fin, close_fin = utils.smart_open(self.input), True
else:
fin, close_fin = self.input, False
fin.seek(offset) # works for gzip/bz2 input, too
previd, document = -1, []
for line in fin:
docid, termid, val = line.split()
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid, termid, val = int(docid) - 1, int(termid) - 1, float(val)
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
if previd >= 0:
break
previd = docid
document.append((termid, val,)) # add another field to the current document
if close_fin:
fin.close()
return document
| 50,908 | 31.82334 | 119 | py |
poincare_glove | poincare_glove-master/gensim/__init__.py | """This package contains interfaces and functionality to compute pair-wise document similarities within a corpus
of documents.
"""
from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401
import logging
__version__ = '3.4.0'
class NullHandler(logging.Handler):
"""For python versions <= 2.6; same as `logging.NullHandler` in 2.7."""
def emit(self, record):
pass
logger = logging.getLogger('gensim')
if len(logger.handlers) == 0: # To ensure reload() doesn't add another one
logger.addHandler(NullHandler())
| 593 | 27.285714 | 114 | py |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/gensim_genmodel.py | #!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s LANGUAGE METHOD
Generate topic models for the specified subcorpus. METHOD is currently one \
of 'tfidf', 'lsi', 'lda', 'rp'.
Example: ./gensim_genmodel.py any lsi
"""
import logging
import sys
import os.path
from gensim.corpora import dmlcorpus, MmCorpus
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel
import gensim_build
# internal method parameters
DIM_RP = 300 # dimensionality for random projections
DIM_LSI = 200 # for lantent semantic indexing
DIM_LDA = 100 # for latent dirichlet allocation
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
method = sys.argv[2].strip().lower()
logging.info("loading corpus mappings")
config = dmlcorpus.DmlConfig('%s_%s' % (gensim_build.PREFIX, language),
resultDir=gensim_build.RESULT_DIR, acceptLangs=[language])
logging.info("loading word id mapping from %s", config.resultFile('wordids.txt'))
id2word = dmlcorpus.DmlCorpus.loadDictionary(config.resultFile('wordids.txt'))
logging.info("loaded %i word ids", len(id2word))
corpus = MmCorpus(config.resultFile('bow.mm'))
if method == 'tfidf':
model = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
model.save(config.resultFile('model_tfidf.pkl'))
elif method == 'lda':
model = ldamodel.LdaModel(corpus, id2word=id2word, num_topics=DIM_LDA)
model.save(config.resultFile('model_lda.pkl'))
elif method == 'lsi':
# first, transform word counts to tf-idf weights
tfidf = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
# then find the transformation from tf-idf to latent space
model = lsimodel.LsiModel(tfidf[corpus], id2word=id2word, num_topics=DIM_LSI)
model.save(config.resultFile('model_lsi.pkl'))
elif method == 'rp':
# first, transform word counts to tf-idf weights
tfidf = tfidfmodel.TfidfModel(corpus, id2word=id2word, normalize=True)
# then find the transformation from tf-idf to latent space
model = rpmodel.RpModel(tfidf[corpus], id2word=id2word, num_topics=DIM_RP)
model.save(config.resultFile('model_rp.pkl'))
else:
raise ValueError('unknown topic extraction method: %s' % repr(method))
MmCorpus.saveCorpus(config.resultFile('%s.mm' % method), model[corpus])
logging.info("finished running %s", program)
| 2,925 | 36.037975 | 91 | py |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/gensim_xml.py | #!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s LANGUAGE METHOD
Generate similar.xml files, using a previously built model for METHOD.
Example: ./gensim_xml.py eng lsi
"""
import logging
import sys
import os.path
from gensim.corpora import dmlcorpus, MmCorpus
from gensim.similarities import MatrixSimilarity, SparseMatrixSimilarity
import gensim_build
# set to True to do everything EXCEPT actually writing out similar.xml files to disk.
# similar.xml files are NOT written if DRY_RUN is true.
DRY_RUN = False
# how many 'most similar' documents to store in each similar.xml?
MIN_SCORE = 0.0 # prune based on similarity score (all below MIN_SCORE are ignored)
MAX_SIMILAR = 10 # prune based on rank (at most MAX_SIMILAR are stored). set to 0 to store all of them (no limit).
# if there are no similar articles (after the pruning), do we still want to generate similar.xml?
SAVE_EMPTY = True
# xml template for similar articles
ARTICLE = """
<article weight="%(score)f">
<authors>
<author>%(author)s</author>
</authors>
<title>%(title)s</title>
<suffix>%(suffix)s</suffix>
<links>
<link source="%(source)s" id="%(intId)s" path="%(pathId)s"/>
</links>
</article>"""
# template for the whole similar.xml file (will be filled with multiple ARTICLE instances)
SIMILAR = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<related>%s
</related>
"""
def generateSimilar(corpus, index, method):
for docNo, topSims in enumerate(index): # for each document
# store similarities to the following file
outfile = os.path.join(corpus.articleDir(docNo), 'similar_%s.xml' % method)
articles = [] # collect similars in this list
for docNo2, score in topSims: # for each most similar article
if score > MIN_SCORE and docNo != docNo2:
source, (intId, pathId) = corpus.documents[docNo2]
meta = corpus.getMeta(docNo2)
suffix, author, title = '', meta.get('author', ''), meta.get('title', '')
articles.append(ARTICLE % locals()) # add the similar article to output
if len(articles) >= MAX_SIMILAR:
break
# now `articles` holds multiple strings in similar_*.xml format
if SAVE_EMPTY or articles:
output = ''.join(articles) # concat all similars to one string
if not DRY_RUN: # only open output files for writing if DRY_RUN is false
logging.info("generating %s (%i similars)", outfile, len(articles))
outfile = open(outfile, 'w')
outfile.write(SIMILAR % output) # add xml headers and print to file
outfile.close()
else:
logging.info("would be generating %s (%i similars):%s\n", outfile, len(articles), output)
else:
logging.debug("skipping %s (no similar found)", outfile)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
method = sys.argv[2].strip().lower()
logging.info("loading corpus mappings")
config = dmlcorpus.DmlConfig('%s_%s' % (gensim_build.PREFIX, language),
resultDir=gensim_build.RESULT_DIR, acceptLangs=[language])
logging.info("loading word id mapping from %s", config.resultFile('wordids.txt'))
id2word = dmlcorpus.DmlCorpus.loadDictionary(config.resultFile('wordids.txt'))
logging.info("loaded %i word ids", len(id2word))
corpus = dmlcorpus.DmlCorpus.load(config.resultFile('.pkl'))
input = MmCorpus(config.resultFile('_%s.mm' % method))
assert len(input) == len(corpus), \
"corpus size mismatch (%i vs %i): run ./gensim_genmodel.py again" % (len(input), len(corpus))
# initialize structure for similarity queries
if method == 'lsi' or method == 'rp': # for these methods, use dense vectors
index = MatrixSimilarity(input, num_best=MAX_SIMILAR + 1, num_features=input.numTerms)
else:
index = SparseMatrixSimilarity(input, num_best=MAX_SIMILAR + 1)
index.normalize = False
generateSimilar(corpus, index, method)
logging.info("finished running %s", program)
| 4,688 | 37.434426 | 115 | py |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/gensim_build.py | #!/usr/bin/env python
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s LANGUAGE
Process the repository, accepting articles in LANGUAGE (or 'any').
Store the word co-occurence matrix and id mappings, which are needed for subsequent processing.
Example: ./gensim_build.py eng
"""
import logging
import sys
import os.path
from gensim.corpora import sources, dmlcorpus
PREFIX = 'dmlcz'
AT_HOME = False
if AT_HOME:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/Users/kofola/workspace/dml/data/dmlcz/'),
sources.DmlSource('numdam', '/Users/kofola/workspace/dml/data/numdam/'),
sources.ArxmlivSource('arxmliv', '/Users/kofola/workspace/dml/data/arxmliv/'),
]
RESULT_DIR = '/Users/kofola/workspace/dml/data/results'
else:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/data/dmlcz/data/share'),
sources.DmlSource('numdam', '/data/dmlcz/data/numdam'),
sources.ArxmlivSource('arxmliv', '/data/dmlcz/data/arxmliv'),
]
RESULT_DIR = '/data/dmlcz/xrehurek/results'
def buildDmlCorpus(config):
dml = dmlcorpus.DmlCorpus()
dml.processConfig(config, shuffle=True)
dml.buildDictionary()
dml.dictionary.filterExtremes(noBelow=5, noAbove=0.3) # ignore too (in)frequent words
dml.save(config.resultFile('.pkl'))
dml.saveAsText() # save id mappings and documents as text data (matrix market format)
return dml
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
# construct the config, which holds information about sources, data file filenames etc.
config = dmlcorpus.DmlConfig('%s_%s' % (PREFIX, language), resultDir=RESULT_DIR, acceptLangs=[language])
for source in SOURCE_LIST:
config.addSource(source)
buildDmlCorpus(config)
logging.info("finished running %s", program)
| 2,276 | 28.960526 | 108 | py |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/dmlcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus for the DML-CZ project.
"""
import logging
import os.path
from gensim import interfaces, matutils
import dictionary # for constructing word->id mappings
logger = logging.getLogger('gensim.corpora.dmlcorpus')
class DmlConfig(object):
"""
DmlConfig contains parameters necessary for the abstraction of a 'corpus of
articles' (see the `DmlCorpus` class).
Articles may come from different sources (=different locations on disk/network,
different file formats etc.), so the main purpose of DmlConfig is to keep all
sources in one place.
Apart from glueing sources together, DmlConfig also decides where to store
output files and which articles to accept for the corpus (= an additional filter
over the sources).
"""
def __init__(self, configId, resultDir, acceptLangs=None):
self.resultDir = resultDir # output files will be stored in this directory
self.configId = configId
self.sources = {} # all article sources; see sources.DmlSource class for an example of source
if acceptLangs is None: # which languages to accept
acceptLangs = {'any'} # if not specified, accept all languages (including unknown/unspecified)
self.acceptLangs = set(acceptLangs)
logger.info('initialized %s', self)
def resultFile(self, fname):
return os.path.join(self.resultDir, self.configId + '_' + fname)
def acceptArticle(self, metadata):
lang = metadata.get('language', 'unk')
if 'any' not in self.acceptLangs and lang not in self.acceptLangs:
return False
return True
def addSource(self, source):
sourceId = str(source)
assert sourceId not in self.sources, "source %s already present in the config!" % sourceId
self.sources[sourceId] = source
def __str__(self):
return ("DmlConfig(id=%s, sources=[%s], acceptLangs=[%s])" %
(self.configId, ', '.join(self.sources.iterkeys()), ', '.join(self.acceptLangs)))
# endclass DmlConfig
class DmlCorpus(interfaces.CorpusABC):
"""
DmlCorpus implements a collection of articles. It is initialized via a DmlConfig
object, which holds information about where to look for the articles and how
to process them.
Apart from being a regular corpus (bag-of-words iterable with a `len()` method),
DmlCorpus has methods for building a dictionary (mapping between words and
their ids).
"""
def __init__(self):
self.documents = []
self.config = None
self.dictionary = dictionary.Dictionary()
def __len__(self):
return len(self.documents)
def __iter__(self):
"""
The function that defines a corpus -- iterating over the corpus yields
bag-of-words vectors, one for each document.
A bag-of-words vector is simply a list of ``(tokenId, tokenCount)`` 2-tuples.
"""
for docNo, (sourceId, docUri) in enumerate(self.documents):
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
yield self.dictionary.doc2bow(words, allowUpdate=False)
def buildDictionary(self):
"""
Populate dictionary mapping and statistics.
This is done by sequentially retrieving the article fulltexts, splitting
them into tokens and converting tokens to their ids (creating new ids as
necessary).
"""
logger.info("creating dictionary from %i articles", len(self.documents))
self.dictionary = dictionary.Dictionary()
numPositions = 0
for docNo, (sourceId, docUri) in enumerate(self.documents):
if docNo % 1000 == 0:
logger.info("PROGRESS: at document #%i/%i (%s, %s)", docNo, len(self.documents), sourceId, docUri)
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
numPositions += len(words)
# convert to bag-of-words, but ignore the result -- here we only care about updating token ids
_ = self.dictionary.doc2bow(words, allowUpdate=True) # noqa:F841
logger.info(
"built %s from %i documents (total %i corpus positions)",
self.dictionary, len(self.documents), numPositions
)
def processConfig(self, config, shuffle=False):
"""
Parse the directories specified in the config, looking for suitable articles.
This updates the self.documents var, which keeps a list of (source id,
article uri) 2-tuples. Each tuple is a unique identifier of one article.
Note that some articles are ignored based on config settings (for example
if the article's language doesn't match any language specified in the
config etc.).
"""
self.config = config
self.documents = []
logger.info("processing config %s", config)
for sourceId, source in config.sources.iteritems():
logger.info("processing source '%s'", sourceId)
accepted = []
for articleUri in source.findArticles():
meta = source.getMeta(articleUri) # retrieve metadata (= dictionary of key->value)
if config.acceptArticle(meta): # do additional filtering on articles, based on the article's metadata
accepted.append((sourceId, articleUri))
logger.info("accepted %i articles for source '%s'", len(accepted), sourceId)
self.documents.extend(accepted)
if not self.documents:
logger.warning('no articles at all found from the config; something went wrong!')
if shuffle:
logger.info("shuffling %i documents for random order", len(self.documents))
import random
random.shuffle(self.documents)
logger.info("accepted total of %i articles for %s", len(self.documents), str(config))
def saveDictionary(self, fname):
logger.info("saving dictionary mapping to %s", fname)
fout = open(fname, 'w')
for tokenId, token in self.dictionary.id2token.iteritems():
fout.write("%i\t%s\n" % (tokenId, token))
fout.close()
@staticmethod
def loadDictionary(fname):
result = {}
for lineNo, line in enumerate(open(fname)):
pair = line[:-1].split('\t')
if len(pair) != 2:
continue
wordId, word = pair
result[int(wordId)] = word
return result
def saveDocuments(self, fname):
logger.info("saving documents mapping to %s", fname)
fout = open(fname, 'w')
for docNo, docId in enumerate(self.documents):
sourceId, docUri = docId
intId, pathId = docUri
fout.write("%i\t%s\n" % (docNo, repr(docId)))
fout.close()
def saveAsText(self):
"""
Store the corpus to disk, in a human-readable text format.
This actually saves multiple files:
1. Pure document-term co-occurence frequency counts, as a Matrix Market file.
2. Token to integer mapping, as a text file.
3. Document to document URI mapping, as a text file.
The exact filesystem paths and filenames are determined from the config.
"""
self.saveDictionary(self.config.resultFile('wordids.txt'))
self.saveDocuments(self.config.resultFile('docids.txt'))
matutils.MmWriter.writeCorpus(self.config.resultFile('bow.mm'), self)
def articleDir(self, docNo):
"""
Return absolute normalized path on filesystem to article no. `docNo`.
"""
sourceId, (_, outPath) = self.documents[docNo]
source = self.config.sources[sourceId]
return os.path.join(source.baseDir, outPath)
def getMeta(self, docNo):
"""
Return metadata for article no. `docNo`.
"""
sourceId, uri = self.documents[docNo]
source = self.config.sources[sourceId]
return source.getMeta(uri)
# endclass DmlCorpus
| 8,424 | 37.47032 | 118 | py |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/__init__.py | 0 | 0 | 0 | py | |
poincare_glove | poincare_glove-master/gensim/examples/dmlcz/sources.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains implementations (= different classes) which encapsulate the
idea of a Digital Library document source.
A document source is basically a collection of articles sharing the same format,
same location (type of access), same way of parsing them etc.
Different sources can be aggregated into a single corpus, which is what the
`DmlCorpus` class does (see the `dmlcorpus` module).
"""
import logging
import os
import os.path
import re
import xml.sax # for parsing arxmliv articles
from gensim import utils
import sys
if sys.version_info[0] >= 3:
unicode = str
PAT_TAG = re.compile(r'<(.*?)>(.*)</.*?>')
logger = logging.getLogger('gensim.corpora.sources')
class ArticleSource(object):
"""
Objects of this class describe a single source of articles.
A source is an abstraction over where the documents reside (the findArticles()
method), how to retrieve their fulltexts, their metadata, how to tokenize the
articles and how to normalize the tokens.
What is NOT abstracted away (ie. must hold for all sources) is the idea of
article identifiers (URIs), which uniquely identify each article within
one source.
This class is just an ABC interface; see eg. DmlSource or ArxmlivSource classes
for concrete instances.
"""
def __init__(self, sourceId):
self.sourceId = sourceId
def __str__(self):
return self.sourceId
def findArticles(self):
raise NotImplementedError('Abstract Base Class')
def getContent(self, uri):
raise NotImplementedError('Abstract Base Class')
def getMeta(self, uri):
raise NotImplementedError('Abstract Base Class')
def tokenize(self, content):
raise NotImplementedError('Abstract Base Class')
def normalizeWord(self, word):
raise NotImplementedError('Abstract Base Class')
# endclass ArticleSource
class DmlSource(ArticleSource):
"""
Article source for articles in DML format (DML-CZ, Numdam):
1) articles = directories starting with '#'
2) content is stored in fulltext.txt
3) metadata are stored in meta.xml
Article URI is currently (a part of) the article's path on filesystem.
See the ArticleSource class for general info on sources.
"""
def __init__(self, sourceId, baseDir):
self.sourceId = sourceId
self.baseDir = os.path.normpath(baseDir)
def __str__(self):
return self.sourceId
@classmethod
def parseDmlMeta(cls, xmlfile):
"""
Parse out all fields from meta.xml, return them as a dictionary.
"""
result = {}
xml = open(xmlfile)
for line in xml:
if line.find('<article>') >= 0: # skip until the beginning of <article> tag
break
for line in xml:
if line.find('</article>') >= 0: # end of <article>, we're done
break
p = re.search(PAT_TAG, line)
if p:
name, cont = p.groups()
name = name.split()[0]
name, cont = name.strip(), cont.strip()
if name == 'msc':
if len(cont) != 5:
logger.warning('invalid MSC=%s in %s', cont, xmlfile)
result.setdefault('msc', []).append(cont)
continue
if name == 'idMR':
cont = cont[2:] # omit MR from MR123456
if name and cont:
result[name] = cont
xml.close()
return result
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
intId = path[1 + path.rfind('#'):]
pathId = path[1 + len(self.baseDir):]
return (intId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain the fulltext.txt file
if not os.path.exists(os.path.join(path, 'fulltext.txt')):
logger.info('missing fulltext in %s', path)
return False
# and also the meta.xml file
if not os.path.exists(os.path.join(path, 'meta.xml')):
logger.info('missing meta.xml in %s', path)
return False
return True
def findArticles(self):
dirTotal = artAccepted = 0
logger.info("looking for '%s' articles inside %s", self.sourceId, self.baseDir)
for root, dirs, files in os.walk(self.baseDir):
dirTotal += 1
root = os.path.normpath(root)
if self.isArticle(root):
artAccepted += 1
yield self.idFromDir(root)
logger.info('%i directories processed, found %i articles', dirTotal, artAccepted)
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'fulltext.txt')
return open(filename).read()
def getMeta(self, uri):
"""
Return article metadata as a attribute->value dictionary.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'meta.xml')
return DmlSource.parseDmlMeta(filename)
def tokenize(self, content):
return [token.encode('utf8') for token in utils.tokenize(content, errors='ignore') if not token.isdigit()]
def normalizeWord(self, word):
wordU = unicode(word, 'utf8')
return wordU.lower().encode('utf8') # lowercase and then convert back to bytestring
# endclass DmlSource
class DmlCzSource(DmlSource):
"""
Article source for articles in DML-CZ format:
1) articles = directories starting with '#'
2) content is stored in fulltext.txt or fulltext_dspace.txt
3) there exists a dspace_id file, containing internal dmlcz id
3) metadata are stored in meta.xml
See the ArticleSource class for general info on sources.
"""
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
dmlczId = open(os.path.join(path, 'dspace_id')).read().strip()
pathId = path[1 + len(self.baseDir):]
return (dmlczId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain a dspace_id file
if not (os.path.exists(os.path.join(path, 'dspace_id'))):
logger.info('missing dspace_id in %s', path)
return False
# and contain either fulltext.txt or fulltext_dspace.txt file
if not (os.path.exists(os.path.join(path, 'fulltext.txt'))
or os.path.exists(os.path.join(path, 'fulltext-dspace.txt'))):
logger.info('missing fulltext in %s', path)
return False
# and contain the meta.xml file
if not os.path.exists(os.path.join(path, 'meta.xml')):
logger.info('missing meta.xml in %s', path)
return False
return True
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename1 = os.path.join(self.baseDir, pathId, 'fulltext.txt')
filename2 = os.path.join(self.baseDir, pathId, 'fulltext-dspace.txt')
if os.path.exists(filename1) and os.path.exists(filename2):
# if both fulltext and dspace files exist, pick the larger one
if os.path.getsize(filename1) < os.path.getsize(filename2):
filename = filename2
else:
filename = filename1
elif os.path.exists(filename1):
filename = filename1
else:
assert os.path.exists(filename2)
filename = filename2
return open(filename).read()
# endclass DmlCzSource
class ArxmlivSource(ArticleSource):
"""
Article source for articles in arxmliv format:
1) articles = directories starting with '#'
2) content is stored in tex.xml
3) metadata in special tags within tex.xml
Article URI is currently (a part of) the article's path on filesystem.
See the ArticleSource class for general info on sources.
"""
class ArxmlivContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.path = [''] # help structure for sax event parsing
self.tokens = [] # will contain tokens once parsing is finished
def startElement(self, name, attr):
# for math tokens, we only care about Math elements directly below <p>
if name == 'Math' and self.path[-1] == 'p' and attr.get('mode', '') == 'inline':
tex = attr.get('tex', '')
if tex and not tex.isdigit():
self.tokens.append('$%s$' % tex.encode('utf8'))
self.path.append(name)
def endElement(self, name):
self.path.pop()
def characters(self, text):
# for text, we only care about tokens directly within the <p> tag
if self.path[-1] == 'p':
tokens = [
token.encode('utf8') for token in utils.tokenize(text, errors='ignore') if not token.isdigit()
]
self.tokens.extend(tokens)
# endclass ArxmlivHandler
class ArxmlivErrorHandler(xml.sax.handler.ErrorHandler):
# Python2.5 implementation of xml.sax is broken -- character streams and
# byte encodings of InputSource are ignored, bad things sometimes happen
# in buffering of multi-byte files (such as utf8), characters get cut in
# the middle, resulting in invalid tokens...
# This is not really a problem with arxmliv xml files themselves, so ignore
# these errors silently.
def error(self, exception):
pass
warning = fatalError = error
# endclass ArxmlivErrorHandler
def __init__(self, sourceId, baseDir):
self.sourceId = sourceId
self.baseDir = os.path.normpath(baseDir)
def __str__(self):
return self.sourceId
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
intId = path[1 + path.rfind('#'):]
pathId = path[1 + len(self.baseDir):]
return (intId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain the tex.xml file
if not os.path.exists(os.path.join(path, 'tex.xml')):
logger.warning('missing tex.xml in %s', path)
return False
return True
def findArticles(self):
dirTotal = artAccepted = 0
logger.info("looking for '%s' articles inside %s", self.sourceId, self.baseDir)
for root, dirs, files in os.walk(self.baseDir):
dirTotal += 1
root = os.path.normpath(root)
if self.isArticle(root):
artAccepted += 1
yield self.idFromDir(root)
logger.info('%i directories processed, found %i articles', dirTotal, artAccepted)
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return open(filename).read()
def getMeta(self, uri):
"""
Return article metadata as an attribute->value dictionary.
"""
# intId, pathId = uri
# filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return {'language': 'eng'} # TODO maybe parse out some meta; but currently not needed for anything...
def tokenize(self, content):
"""
Parse tokens out of xml. There are two types of token: normal text and
mathematics. Both are returned interspersed in a single list, in the same
order as they appeared in the content.
The math tokens will be returned in the form $tex_expression$, ie. with
a dollar sign prefix and suffix.
"""
handler = ArxmlivSource.ArxmlivContentHandler()
xml.sax.parseString(content, handler, ArxmlivSource.ArxmlivErrorHandler())
return handler.tokens
def normalizeWord(self, word):
if word[0] == '$': # ignore math tokens
return word
wordU = unicode(word, 'utf8')
return wordU.lower().encode('utf8') # lowercase and then convert back to bytestring
# endclass ArxmlivSource
| 12,851 | 35.101124 | 114 | py |
poincare_glove | poincare_glove-master/gensim/summarization/summarizer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module provides functions for summarizing texts. Summarizing is based on
ranks of text sentences using a variation of the TextRank algorithm [1]_.
.. [1] Federico Barrios, Federico L´opez, Luis Argerich, Rosita Wachenchauzer (2016).
Variations of the Similarity Function of TextRank for Automated Summarization,
https://arxiv.org/abs/1602.03606
Data
----
.. data:: INPUT_MIN_LENGTH - Minimal number of sentences in text
.. data:: WEIGHT_THRESHOLD - Minimal weight of edge between graph nodes. Smaller weights set to zero.
Example
-------
>>> from gensim.summarization.summarizer import summarize
>>> text = '''Rice Pudding - Poem by Alan Alexander Milne
... What is the matter with Mary Jane?
... She's crying with all her might and main,
... And she won't eat her dinner - rice pudding again -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... I've promised her dolls and a daisy-chain,
... And a book about animals - all in vain -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... She's perfectly well, and she hasn't a pain;
... But, look at her, now she's beginning again! -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... I've promised her sweets and a ride in the train,
... And I've begged her to stop for a bit and explain -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... She's perfectly well and she hasn't a pain,
... And it's lovely rice pudding for dinner again!
... What is the matter with Mary Jane?'''
>>> print(summarize(text))
And she won't eat her dinner - rice pudding again -
I've promised her dolls and a daisy-chain,
I've promised her sweets and a ride in the train,
And it's lovely rice pudding for dinner again!
"""
import logging
from gensim.utils import deprecated
from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
from gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences
from gensim.summarization.commons import build_graph as _build_graph
from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
from gensim.summarization.bm25 import get_bm25_weights as _bm25_weights
from gensim.corpora import Dictionary
from math import log10 as _log10
from six.moves import xrange
INPUT_MIN_LENGTH = 10
WEIGHT_THRESHOLD = 1.e-3
logger = logging.getLogger(__name__)
def _set_graph_edge_weights(graph):
"""Sets weights using BM25 algorithm. Leaves small weights as zeroes. If all weights are fairly small,
forces all weights to 1, inplace.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
"""
documents = graph.nodes()
weights = _bm25_weights(documents)
for i in xrange(len(documents)):
for j in xrange(len(documents)):
if i == j or weights[i][j] < WEIGHT_THRESHOLD:
continue
sentence_1 = documents[i]
sentence_2 = documents[j]
edge_1 = (sentence_1, sentence_2)
edge_2 = (sentence_2, sentence_1)
if not graph.has_edge(edge_1):
graph.add_edge(edge_1, weights[i][j])
if not graph.has_edge(edge_2):
graph.add_edge(edge_2, weights[j][i])
# Handles the case in which all similarities are zero.
# The resultant summary will consist of random sentences.
if all(graph.edge_weight(edge) == 0 for edge in graph.edges()):
_create_valid_graph(graph)
def _create_valid_graph(graph):
"""Sets all weights of edges for different edges as 1, inplace.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
"""
nodes = graph.nodes()
for i in xrange(len(nodes)):
for j in xrange(len(nodes)):
if i == j:
continue
edge = (nodes[i], nodes[j])
if graph.has_edge(edge):
graph.del_edge(edge)
graph.add_edge(edge, 1)
@deprecated("Function will be removed in 4.0.0")
def _get_doc_length(doc):
"""Get length of (tokenized) document.
Parameters
----------
doc : list of (list of (tuple of int))
Given document.
Returns
-------
int
Length of document.
"""
return sum([item[1] for item in doc])
@deprecated("Function will be removed in 4.0.0")
def _get_similarity(doc1, doc2, vec1, vec2):
"""Returns similarity of two documents.
Parameters
----------
doc1 : list of (list of (tuple of int))
First document.
doc2 : list of (list of (tuple of int))
Second document.
vec1 : array
? of first document.
vec1 : array
? of secont document.
Returns
-------
float
Similarity of two documents.
"""
numerator = vec1.dot(vec2.transpose()).toarray()[0][0]
length_1 = _get_doc_length(doc1)
length_2 = _get_doc_length(doc2)
denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0
return numerator / denominator if denominator != 0 else 0
def _build_corpus(sentences):
"""Construct corpus from provided sentences.
Parameters
----------
sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Given sentences.
Returns
-------
list of list of (int, int)
Corpus built from sentences.
"""
split_tokens = [sentence.token.split() for sentence in sentences]
dictionary = Dictionary(split_tokens)
return [dictionary.doc2bow(token) for token in split_tokens]
def _get_important_sentences(sentences, corpus, important_docs):
"""Get most important sentences.
Parameters
----------
sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Given sentences.
corpus : list of list of (int, int)
Provided corpus.
important_docs : list of list of (int, int)
Most important documents of the corpus.
Returns
-------
list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Most important sentences.
"""
hashable_corpus = _build_hasheable_corpus(corpus)
sentences_by_corpus = dict(zip(hashable_corpus, sentences))
return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs]
def _get_sentences_with_word_count(sentences, word_count):
"""Get list of sentences. Total number of returned words close to specified `word_count`.
Parameters
----------
sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Given sentences.
word_count : int or None
Number of returned words. If None full most important sentences will be returned.
Returns
-------
list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Most important sentences.
"""
length = 0
selected_sentences = []
# Loops until the word count is reached.
for sentence in sentences:
words_in_sentence = len(sentence.text.split())
# Checks if the inclusion of the sentence gives a better approximation
# to the word parameter.
if abs(word_count - length - words_in_sentence) > abs(word_count - length):
return selected_sentences
selected_sentences.append(sentence)
length += words_in_sentence
return selected_sentences
def _extract_important_sentences(sentences, corpus, important_docs, word_count):
"""Get most important sentences of the `corpus`.
Parameters
----------
sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Given sentences.
corpus : list of list of (int, int)
Provided corpus.
important_docs : list of list of (int, int)
Most important docs of the corpus.
word_count : int
Number of returned words. If None full most important sentences will be returned.
Returns
-------
list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Most important sentences.
"""
important_sentences = _get_important_sentences(sentences, corpus, important_docs)
# If no "word_count" option is provided, the number of sentences is
# reduced by the provided ratio. Else, the ratio is ignored.
return important_sentences \
if word_count is None \
else _get_sentences_with_word_count(important_sentences, word_count)
def _format_results(extracted_sentences, split):
"""Returns `extracted_sentences` in desired format.
Parameters
----------
extracted_sentences : list of :class:~gensim.summarization.syntactic_unit.SyntacticUnit
Given sentences.
split : bool
If True sentences will be returned as list. Otherwise sentences will be merged and returned as string.
Returns
-------
list of str
If `split` **OR**
str
Formatted result.
"""
if split:
return [sentence.text for sentence in extracted_sentences]
return "\n".join([sentence.text for sentence in extracted_sentences])
def _build_hasheable_corpus(corpus):
"""Hashes and get `corpus`.
Parameters
----------
corpus : list of list of (int, int)
Given corpus.
Returns
-------
list of list of (int, int)
Hashable corpus.
"""
return [tuple(doc) for doc in corpus]
def summarize_corpus(corpus, ratio=0.2):
"""Get a list of the most important documents of a corpus using a variation of the TextRank algorithm [1]_.
Used as helper for summarize :func:`~gensim.summarization.summarizer.summarizer`
Note
----
The input must have at least :const:`~gensim.summarization.summarizer.INPUT_MIN_LENGTH` documents for the summary
to make sense.
Parameters
----------
corpus : list of list of (int, int)
Given corpus.
ratio : float, optional
Number between 0 and 1 that determines the proportion of the number of
sentences of the original text to be chosen for the summary, optional.
Returns
-------
list of str
Most important documents of given `corpus` sorted by the document score, highest first.
"""
hashable_corpus = _build_hasheable_corpus(corpus)
# If the corpus is empty, the function ends.
if len(corpus) == 0:
logger.warning("Input corpus is empty.")
return []
# Warns the user if there are too few documents.
if len(corpus) < INPUT_MIN_LENGTH:
logger.warning("Input corpus is expected to have at least %d documents.", INPUT_MIN_LENGTH)
graph = _build_graph(hashable_corpus)
_set_graph_edge_weights(graph)
_remove_unreachable_nodes(graph)
# Cannot calculate eigenvectors if number of unique documents in corpus < 3.
# Warns user to add more text. The function ends.
if len(graph.nodes()) < 3:
logger.warning("Please add more sentences to the text. The number of reachable nodes is below 3")
return []
pagerank_scores = _pagerank(graph)
hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True)
return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]]
def summarize(text, ratio=0.2, word_count=None, split=False):
"""Get a summarized version of the given text.
The output summary will consist of the most representative sentences
and will be returned as a string, divided by newlines.
Note
----
The input should be a string, and must be longer than :const:`~gensim.summarization.summarizer.INPUT_MIN_LENGTH`
sentences for the summary to make sense.
The text will be split into sentences using the split_sentences method in the :mod:`gensim.summarization.texcleaner`
module. Note that newlines divide sentences.
Parameters
----------
text : str
Given text.
ratio : float, optional
Number between 0 and 1 that determines the proportion of the number of
sentences of the original text to be chosen for the summary.
word_count : int or None, optional
Determines how many words will the output contain.
If both parameters are provided, the ratio will be ignored.
split : bool, optional
If True, list of sentences will be returned. Otherwise joined
strings will bwe returned.
Returns
-------
list of str
If `split` **OR**
str
Most representative sentences of given the text.
"""
# Gets a list of processed sentences.
sentences = _clean_text_by_sentences(text)
# If no sentence could be identified, the function ends.
if len(sentences) == 0:
logger.warning("Input text is empty.")
return [] if split else u""
# If only one sentence is present, the function raises an error (Avoids ZeroDivisionError).
if len(sentences) == 1:
raise ValueError("input must have more than one sentence")
# Warns if the text is too short.
if len(sentences) < INPUT_MIN_LENGTH:
logger.warning("Input text is expected to have at least %d sentences.", INPUT_MIN_LENGTH)
corpus = _build_corpus(sentences)
most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)
# If couldn't get important docs, the algorithm ends.
if not most_important_docs:
logger.warning("Couldn't get relevant sentences.")
return [] if split else u""
# Extracts the most important sentences with the selected criterion.
extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count)
# Sorts the extracted sentences by apparition order in the original text.
extracted_sentences.sort(key=lambda s: s.index)
return _format_results(extracted_sentences, split)
| 14,067 | 30.684685 | 120 | py |
poincare_glove | poincare_glove-master/gensim/summarization/bm25.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains function of computing rank scores for documents in
corpus and helper class `BM25` used in calculations. Original algorithm
descibed in [1]_, also you may check Wikipedia page [2]_.
.. [1] Robertson, Stephen; Zaragoza, Hugo (2009). The Probabilistic Relevance Framework: BM25 and Beyond,
http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf
.. [2] Okapi BM25 on Wikipedia, https://en.wikipedia.org/wiki/Okapi_BM25
Examples
--------
>>> from gensim.summarization.bm25 import get_bm25_weights
>>> corpus = [
... ["black", "cat", "white", "cat"],
... ["cat", "outer", "space"],
... ["wag", "dog"]
... ]
>>> result = get_bm25_weights(corpus)
Data:
-----
.. data:: PARAM_K1 - Free smoothing parameter for BM25.
.. data:: PARAM_B - Free smoothing parameter for BM25.
.. data:: EPSILON - Constant used for negative idf of document in corpus.
"""
import math
from six import iteritems
from six.moves import xrange
PARAM_K1 = 1.5
PARAM_B = 0.75
EPSILON = 0.25
class BM25(object):
"""Implementation of Best Matching 25 ranking function.
Attributes
----------
corpus_size : int
Size of corpus (number of documents).
avgdl : float
Average length of document in `corpus`.
corpus : list of list of str
Corpus of documents.
f : list of dicts of int
Dictionary with terms frequencies for each document in `corpus`. Words used as keys and frequencies as values.
df : dict
Dictionary with terms frequencies for whole `corpus`. Words used as keys and frequencies as values.
idf : dict
Dictionary with inversed terms frequencies for whole `corpus`. Words used as keys and frequencies as values.
doc_len : list of int
List of document lengths.
"""
def __init__(self, corpus):
"""
Parameters
----------
corpus : list of list of str
Given corpus.
"""
self.corpus_size = len(corpus)
self.avgdl = sum(float(len(x)) for x in corpus) / self.corpus_size
self.corpus = corpus
self.f = []
self.df = {}
self.idf = {}
self.doc_len = []
self.initialize()
def initialize(self):
"""Calculates frequencies of terms in documents and in corpus. Also computes inverse document frequencies."""
for document in self.corpus:
frequencies = {}
self.doc_len.append(len(document))
for word in document:
if word not in frequencies:
frequencies[word] = 0
frequencies[word] += 1
self.f.append(frequencies)
for word, freq in iteritems(frequencies):
if word not in self.df:
self.df[word] = 0
self.df[word] += 1
for word, freq in iteritems(self.df):
self.idf[word] = math.log(self.corpus_size - freq + 0.5) - math.log(freq + 0.5)
def get_score(self, document, index, average_idf):
"""Computes BM25 score of given `document` in relation to item of corpus selected by `index`.
Parameters
----------
document : list of str
Document to be scored.
index : int
Index of document in corpus selected to score with `document`.
average_idf : float
Average idf in corpus.
Returns
-------
float
BM25 score.
"""
score = 0
for word in document:
if word not in self.f[index]:
continue
idf = self.idf[word] if self.idf[word] >= 0 else EPSILON * average_idf
score += (idf * self.f[index][word] * (PARAM_K1 + 1)
/ (self.f[index][word] + PARAM_K1 * (1 - PARAM_B + PARAM_B * self.doc_len[index] / self.avgdl)))
return score
def get_scores(self, document, average_idf):
"""Computes and returns BM25 scores of given `document` in relation to
every item in corpus.
Parameters
----------
document : list of str
Document to be scored.
average_idf : float
Average idf in corpus.
Returns
-------
list of float
BM25 scores.
"""
scores = []
for index in xrange(self.corpus_size):
score = self.get_score(document, index, average_idf)
scores.append(score)
return scores
def get_bm25_weights(corpus):
"""Returns BM25 scores (weights) of documents in corpus.
Each document has to be weighted with every document in given corpus.
Parameters
----------
corpus : list of list of str
Corpus of documents.
Returns
-------
list of list of float
BM25 scores.
Examples
--------
>>> from gensim.summarization.bm25 import get_bm25_weights
>>> corpus = [
... ["black", "cat", "white", "cat"],
... ["cat", "outer", "space"],
... ["wag", "dog"]
... ]
>>> result = get_bm25_weights(corpus)
"""
bm25 = BM25(corpus)
average_idf = sum(float(val) for val in bm25.idf.values()) / len(bm25.idf)
weights = []
for doc in corpus:
scores = bm25.get_scores(doc, average_idf)
weights.append(scores)
return weights
| 5,481 | 28.005291 | 118 | py |
poincare_glove | poincare_glove-master/gensim/summarization/mz_entropy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
from gensim.utils import to_unicode
import numpy
import scipy
def mz_keywords(text, blocksize=1024, scores=False, split=False, weighted=True, threshold=0.0):
"""Extract keywords from text using the Montemurro and Zanette entropy algorithm. [1]_
Parameters
----------
text: str
Document for summarization.
blocksize: int, optional
Size of blocks to use in analysis.
scores: bool, optional
Whether to return score with keywords.
split: bool, optional
Whether to return results as list.
weighted: bool, optional
Whether to weight scores by word frequency.
False can useful for shorter texts, and allows automatic thresholding.
threshold: float or 'auto', optional
Minimum score for returned keywords, 'auto' calculates the threshold as n_blocks / (n_blocks + 1.0) + 1e-8,
use 'auto' with `weighted=False`.
Returns
-------
results: str
newline separated keywords if `split` == False **OR**
results: list(str)
list of keywords if `scores` == False **OR**
results: list(tuple(str, float))
list of (keyword, score) tuples if `scores` == True
Results are returned in descending order of score regardless of the format.
Note
----
This algorithm looks for keywords that contribute to the structure of the
text on scales of `blocksize` words of larger. It is suitable for extracting
keywords representing the major themes of long texts.
References
----------
.. [1] Marcello A Montemurro, Damian Zanette, "Towards the quantification of the semantic information encoded in
written language". Advances in Complex Systems, Volume 13, Issue 2 (2010), pp. 135-153,
DOI: 10.1142/S0219525910002530, https://arxiv.org/abs/0907.1558
"""
text = to_unicode(text)
words = [word for word in _tokenize_by_word(text)]
vocab = sorted(set(words))
word_counts = numpy.array(
[
[words[i:i + blocksize].count(word) for word in vocab]
for i in range(0, len(words), blocksize)
]
).astype('d')
n_blocks = word_counts.shape[0]
totals = word_counts.sum(axis=0)
n_words = totals.sum()
p = word_counts / totals
log_p = numpy.log2(p)
h = numpy.nan_to_num(p * log_p).sum(axis=0)
analytic = __analytic_entropy(blocksize, n_blocks, n_words)
h += analytic(totals).astype('d')
if weighted:
h *= totals / n_words
if threshold == 'auto':
threshold = n_blocks / (n_blocks + 1.0) + 1.0e-8
weights = [(word, score) for (word, score) in zip(vocab, h) if score > threshold]
weights.sort(key=lambda x: -x[1])
result = weights if scores else [word for (word, score) in weights]
if not (scores or split):
result = '\n'.join(result)
return result
def __log_combinations_inner(n, m):
"""Calculates the logarithm of n!/m!(n-m)!"""
return -(numpy.log(n + 1) + scipy.special.betaln(n - m + 1, m + 1))
__log_combinations = numpy.frompyfunc(__log_combinations_inner, 2, 1)
def __marginal_prob(blocksize, n_words):
def marginal_prob(n, m):
"""Marginal probability of a word that occurs n times in the document
occurring m times in a given block"""
return numpy.exp(
__log_combinations(n, m) +
__log_combinations(n_words - n, blocksize - m) -
__log_combinations(n_words, blocksize)
)
return numpy.frompyfunc(marginal_prob, 2, 1)
def __analytic_entropy(blocksize, n_blocks, n_words):
marginal = __marginal_prob(blocksize, n_words)
def analytic_entropy(n):
"""Predicted entropy for a word that occurs n times in the document"""
m = numpy.arange(1, min(blocksize, n) + 1).astype('d')
p = m / n
elements = numpy.nan_to_num(p * numpy.log2(p)) * marginal(n, m)
return -n_blocks * elements.sum()
return numpy.frompyfunc(analytic_entropy, 1, 1)
| 4,193 | 33.95 | 116 | py |
poincare_glove | poincare_glove-master/gensim/summarization/syntactic_unit.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains implementation of SyntacticUnit class. It generally used while text cleaning.
:class:`~gensim.summarization.syntactic_unit.SyntacticUnit` represents printable version of provided text.
"""
class SyntacticUnit(object):
"""SyntacticUnit class.
Attributes
----------
text : str
Input text.
token : str
Tokenized text.
tag : str
Tag of unit, optional.
index : int
Index of sytactic unit in corpus, optional.
score : float
Score of synctatic unit, optional.
"""
def __init__(self, text, token=None, tag=None):
"""
Parameters
----------
text : str
Input text.
token : str
Tokenized text, optional.
tag : str
Tag of unit, optional.
"""
self.text = text
self.token = token
self.tag = tag[:2] if tag else None # Just first two letters of tag
self.index = -1
self.score = -1
def __str__(self):
return "Original unit: '" + self.text + "' *-*-*-* " + "Processed unit: '" + self.token + "'"
def __repr__(self):
return str(self)
| 1,310 | 23.277778 | 106 | py |
poincare_glove | poincare_glove-master/gensim/summarization/keywords.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions to find keywords of the text and building graph on tokens from text.
Examples
--------
Extract keywords from text
>>> from gensim.summarization import keywords
>>> text='''Challenges in natural language processing frequently involve
... speech recognition, natural language understanding, natural language
... generation (frequently from formal, machine-readable logical forms),
... connecting language and machine perception, dialog systems, or some
... combination thereof.'''
>>> keywords(text).split('\\n')
[u'natural language', u'machine', u'frequently']
Notes
-----
Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters
for `INCLUDING_FILTER` and `EXCLUDING_FILTER`
Data:
-----
.. data:: WINDOW_SIZE - Size of window, number of consecutive tokens in processing.
.. data:: INCLUDING_FILTER - Including part of speech filters.
.. data:: EXCLUDING_FILTER - Excluding part of speech filters.
"""
from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
from gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word
from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
from gensim.summarization.commons import build_graph as _build_graph
from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
from gensim.utils import to_unicode
from itertools import combinations as _combinations
from six.moves.queue import Queue as _Queue
from six.moves import xrange
from six import iteritems
WINDOW_SIZE = 2
INCLUDING_FILTER = ['NN', 'JJ']
EXCLUDING_FILTER = []
def _get_pos_filters():
"""Get default including and excluding filters as frozen sets.
Returns
-------
(frozenset of str, frozenset of str)
Including and excluding filters.
"""
return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)
def _get_words_for_graph(tokens, pos_filter=None):
"""Filters given dictionary of tokens using provided part of speech filters.
Parameters
----------
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
pos_filter : iterable
Part of speech filters, optional. If `None` - using :func:`_get_pos_filters`.
Returns
-------
list of str
Filtered tokens.
Raises
------
ValueError
If include and exclude filters ar not empty at the same time.
"""
if pos_filter is None:
include_filters, exclude_filters = _get_pos_filters()
else:
include_filters = set(pos_filter)
exclude_filters = frozenset([])
if include_filters and exclude_filters:
raise ValueError("Can't use both include and exclude filters, should use only one")
result = []
for word, unit in iteritems(tokens):
if exclude_filters and unit.tag in exclude_filters:
continue
if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
result.append(unit.token)
return result
def _get_first_window(split_text):
"""Get first :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens from given `split_text`.
Parameters
----------
split_text : list of str
Splitted text.
Returns
-------
list of str
First :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens.
"""
return split_text[:WINDOW_SIZE]
def _set_graph_edge(graph, tokens, word_a, word_b):
"""Sets an edge between nodes named word_a and word_b if they exists in `tokens` and `graph`, inplace.
Parameters
----------
graph : :class:~gensim.summarization.graph.Graph
Given graph.
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
word_a : str
First word, name of first node.
word_b : str
Second word, name of second node.
"""
if word_a in tokens and word_b in tokens:
lemma_a = tokens[word_a].token
lemma_b = tokens[word_b].token
edge = (lemma_a, lemma_b)
if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):
graph.add_edge(edge)
def _process_first_window(graph, tokens, split_text):
"""Sets an edges between nodes taken from first :const:`~gensim.parsing.keywords.WINDOW_SIZE`
words of `split_text` if they exist in `tokens` and `graph`, inplace.
Parameters
----------
graph : :class:~gensim.summarization.graph.Graph
Given graph.
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
split_text : list of str
Splitted text.
"""
first_window = _get_first_window(split_text)
for word_a, word_b in _combinations(first_window, 2):
_set_graph_edge(graph, tokens, word_a, word_b)
def _init_queue(split_text):
"""Initialize queue by first words from `split_text`.
Parameters
----------
split_text : list of str
Splitted text.
Returns
-------
Queue
Initialized queue.
"""
queue = _Queue()
first_window = _get_first_window(split_text)
for word in first_window[1:]:
queue.put(word)
return queue
def _process_word(graph, tokens, queue, word):
"""Sets edge between `word` and each element in queue in `graph` if such nodes
exist in `tokens` and `graph`.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
queue : Queue
Given queue.
word : str
Word, possible `node` in graph and item in `tokens`.
"""
for word_to_compare in _queue_iterator(queue):
_set_graph_edge(graph, tokens, word, word_to_compare)
def _update_queue(queue, word):
"""Updates given `queue` (removes last item and puts `word`).
Parameters
----------
queue : Queue
Given queue.
word : str
Word to be added to queue.
"""
queue.get()
queue.put(word)
assert queue.qsize() == (WINDOW_SIZE - 1)
def _process_text(graph, tokens, split_text):
"""Process `split_text` by updating given `graph` with new eges between nodes
if they exists in `tokens` and `graph`.
Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
split_text : list of str
Splitted text.
"""
queue = _init_queue(split_text)
for i in xrange(WINDOW_SIZE, len(split_text)):
word = split_text[i]
_process_word(graph, tokens, queue, word)
_update_queue(queue, word)
def _queue_iterator(queue):
"""Represents iterator of the given queue.
Parameters
----------
queue : Queue
Given queue.
Yields
------
str
Current item of queue.
"""
iterations = queue.qsize()
for _ in xrange(iterations):
var = queue.get()
yield var
queue.put(var)
def _set_graph_edges(graph, tokens, split_text):
"""Updates given `graph` by setting eges between nodes if they exists in `tokens` and `graph`.
Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.
Parameters
----------
graph : :class:~gensim.summarization.graph.Graph
Given graph.
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
split_text : list of str
Splitted text.
"""
_process_first_window(graph, tokens, split_text)
_process_text(graph, tokens, split_text)
def _extract_tokens(lemmas, scores, ratio, words):
"""Extracts tokens from provided lemmas. Most scored lemmas are used if `words` not provided.
Parameters
----------
lemmas : list of str
Given lemmas.
scores : dict
Dictionary with lemmas and its scores.
ratio : float
Proportion of lemmas used for final result.
words : int
Number of used words. If no "words" option is selected, the number of
sentences is reduced by the provided ratio, else, the ratio is ignored.
Returns
-------
list of (float, str)
Scores and corresponded lemmas.
"""
lemmas.sort(key=lambda s: scores[s], reverse=True)
length = len(lemmas) * ratio if words is None else words
return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
def _lemmas_to_words(tokens):
"""Get words and lemmas from given tokens. Produces "reversed" `tokens`.
Parameters
----------
tokens : dict
Original units (words) as keys and processed units (tokens) as values.
Returns
-------
dict
Lemmas as keys and lists corresponding words as values.
"""
lemma_to_word = {}
for word, unit in iteritems(tokens):
lemma = unit.token
if lemma in lemma_to_word:
lemma_to_word[lemma].append(word)
else:
lemma_to_word[lemma] = [word]
return lemma_to_word
def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
"""Get words of `extracted_lemmas` and its scores, words contains in `lemma_to_word`.
Parameters
----------
extracted_lemmas : list of (float, str)
Given lemmas with scores
lemma_to_word : dict
Lemmas and corresponding words.
Returns
-------
dict
Keywords as keys and its scores as values.
"""
keywords = {}
for score, lemma in extracted_lemmas:
keyword_list = lemma_to_word[lemma]
for keyword in keyword_list:
keywords[keyword] = score
return keywords
def _strip_word(word):
"""Get cleaned `word`.
Parameters
----------
word : str
Given word.
Returns
-------
str
Cleaned word.
"""
stripped_word_list = list(_tokenize_by_word(word))
return stripped_word_list[0] if stripped_word_list else ""
def _get_combined_keywords(_keywords, split_text):
"""Get most scored words (`_keywords`) contained in `split_text` and it's combinations.
Parameters
----------
_keywords : dict
Keywords as keys and its scores as values.
split_text : list of str
Splitted text.
Returns
-------
list of str
Keywords and/or its combinations.
"""
result = []
_keywords = _keywords.copy()
len_text = len(split_text)
for i in xrange(len_text):
word = _strip_word(split_text[i])
if word in _keywords:
combined_word = [word]
if i + 1 == len_text:
result.append(word) # appends last word if keyword and doesn't iterate
for j in xrange(i + 1, len_text):
other_word = _strip_word(split_text[j])
if other_word in _keywords and other_word == split_text[j] and other_word not in combined_word:
combined_word.append(other_word)
else:
for keyword in combined_word:
_keywords.pop(keyword)
result.append(" ".join(combined_word))
break
return result
def _get_average_score(concept, _keywords):
"""Get average score of words in `concept`.
Parameters
----------
concept : str
Input text.
_keywords : dict
Keywords as keys and its scores as values.
Returns
-------
float
Average score.
"""
word_list = concept.split()
word_counter = 0
total = 0
for word in word_list:
total += _keywords[word]
word_counter += 1
return total / word_counter
def _format_results(_keywords, combined_keywords, split, scores):
"""Formats, sorts and returns `combined_keywords` in desired format.
Parameters
----------
_keywords : dict
Keywords as keys and its scores as values.
combined_keywords : list of str
Most ranked words and/or its combinations.
split : bool
Split result if True or return string otherwise, optional.
scores : bool
Whether return `combined_keywords` with scores, optional. If True
`split` is ignored.
Returns
-------
result: list of (str, float)
If `scores`, keywords with scores **OR**
result: list of str
If `split`, keywords only **OR**
result: str
Keywords, joined by endl.
"""
combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)
if scores:
return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]
if split:
return combined_keywords
return "\n".join(combined_keywords)
def keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=('NN', 'JJ'),
lemmatize=False, deacc=True):
"""Get most ranked words of provided text and/or its combinations.
Parameters
----------
text : str
Input text.
ratio : float, optional
If no "words" option is selected, the number of sentences is reduced by the provided ratio,
else, the ratio is ignored.
words : int, optional
Number of returned words.
split : bool, optional
Whether split keywords if True.
scores : bool, optional
Whether score of keyword.
pos_filter : tuple, optional
Part of speech filters.
lemmatize : bool, optional
If True - lemmatize words.
deacc : bool, optional
If True - remove accentuation.
Returns
-------
result: list of (str, float)
If `scores`, keywords with scores **OR**
result: list of str
If `split`, keywords only **OR**
result: str
Keywords, joined by endl.
"""
# Gets a dict of word -> lemma
text = to_unicode(text)
tokens = _clean_text_by_word(text, deacc=deacc)
split_text = list(_tokenize_by_word(text))
# Creates the graph and adds the edges
graph = _build_graph(_get_words_for_graph(tokens, pos_filter))
_set_graph_edges(graph, tokens, split_text)
del split_text # It's no longer used
_remove_unreachable_nodes(graph)
# Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
pagerank_scores = _pagerank(graph)
extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)
# The results can be polluted by many variations of the same word
if lemmatize:
lemmas_to_word = {}
for word, unit in iteritems(tokens):
lemmas_to_word[unit.token] = [word]
else:
lemmas_to_word = _lemmas_to_words(tokens)
keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)
# text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
combined_keywords = _get_combined_keywords(keywords, text.split())
return _format_results(keywords, combined_keywords, split, scores)
def get_graph(text):
"""Creates and returns graph from given text, cleans and tokenize text before building graph.
Parameters
----------
text : str
Sequence of values.
Returns
-------
:class:`~gensim.summarization.graph.Graph`
Created graph.
"""
tokens = _clean_text_by_word(text)
split_text = list(_tokenize_by_word(text))
graph = _build_graph(_get_words_for_graph(tokens))
_set_graph_edges(graph, tokens, split_text)
return graph
| 15,888 | 27.526032 | 111 | py |
poincare_glove | poincare_glove-master/gensim/summarization/commons.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module provides functions of creating graph from sequence of values and removing of unreachable nodes.
Examples
--------
Create simple graph and add edges. Let's take a look at nodes.
>>> gg = build_graph(['Felidae', 'Lion', 'Tiger', 'Wolf'])
>>> gg.add_edge(("Felidae", "Lion"))
>>> gg.add_edge(("Felidae", "Tiger"))
>>> sorted(gg.nodes())
['Felidae', 'Lion', 'Tiger', 'Wolf']
Remove nodes with no edges.
>>> remove_unreachable_nodes(gg)
>>> sorted(gg.nodes())
['Felidae', 'Lion', 'Tiger']
"""
from gensim.summarization.graph import Graph
def build_graph(sequence):
"""Creates and returns undirected graph with given sequence of values.
Parameters
----------
sequence : list of hashable
Sequence of values.
Returns
-------
:class:`~gensim.summarization.graph.Graph`
Created graph.
"""
graph = Graph()
for item in sequence:
if not graph.has_node(item):
graph.add_node(item)
return graph
def remove_unreachable_nodes(graph):
"""Removes unreachable nodes (nodes with no edges), inplace.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
"""
for node in graph.nodes():
if sum(graph.edge_weight((node, other)) for other in graph.neighbors(node)) == 0:
graph.del_node(node)
| 1,487 | 21.892308 | 110 | py |
poincare_glove | poincare_glove-master/gensim/summarization/graph.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains abstract class IGraph represents graphs interface and
class Graph (based on IGraph) which implements undirected graph.
Examples
--------
Create simple graph with 4 nodes.
>>> g = Graph()
>>> g.add_node('Felidae')
>>> g.add_node('Lion')
>>> g.add_node('Tiger')
>>> g.add_node('Wolf')
>>> sorted(g.nodes())
['Felidae', 'Lion', 'Tiger', 'Wolf']
Add some edges and check neighbours.
>>> g.add_edge(("Felidae", "Lion"))
>>> g.add_edge(("Felidae", "Tiger"))
>>> g.neighbors("Felidae")
['Lion', 'Tiger']
One node has no neighbours.
>>> g.neighbors("Wolf")
[]
"""
from abc import ABCMeta, abstractmethod
class IGraph(object):
"""Represents the interface or contract that the graph for TextRank
should implement.
"""
__metaclass__ = ABCMeta
@abstractmethod
def nodes(self):
"""Returns all nodes of graph.
Returns
-------
list of hashable
Nodes of graph.
"""
pass
@abstractmethod
def edges(self):
"""Returns all edges of graph.
Returns
-------
list of (hashable, hashable)
Edges of graph.
"""
pass
@abstractmethod
def neighbors(self, node):
"""Return all nodes that are directly accessible from given node.
Parameters
----------
node : hashable
Given node identifier.
Returns
-------
list of hashable
Nodes directly accessible from given `node`.
"""
pass
@abstractmethod
def has_node(self, node):
"""Returns whether the requested node exists.
Parameters
----------
node : hashable
Given node identifier.
Returns
-------
bool
True if `node` exists, False otherwise.
"""
pass
@abstractmethod
def add_node(self, node, attrs=None):
"""Adds given node to the graph.
Note
----
While nodes can be of any type, it's strongly recommended to use only numbers and single-line strings
as node identifiers if you intend to use write().
Parameters
----------
node : hashable
Given node
attrs : list, optional
Node attributes specified as (attribute, value)
"""
pass
@abstractmethod
def add_edge(self, edge, wt=1, label='', attrs=None):
"""Adds an edge to the graph connecting two nodes. An edge, here,
is a tuple of two nodes.
Parameters
----------
edge : (hashable, hashable)
Given edge.
wt : float, optional
Weight of new edge.
label : str, optional
Edge label.
attrs : list, optional
Node attributes specified as (attribute, value)
"""
pass
@abstractmethod
def has_edge(self, edge):
"""Returns whether an edge exists.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
bool
True if `edge` exists, False otherwise.
"""
pass
@abstractmethod
def edge_weight(self, edge):
"""Returns weigth of given edge.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
float
Edge weight.
"""
pass
@abstractmethod
def del_node(self, node):
"""Removes node and its edges from the graph.
Parameters
----------
node : hashable
Node to delete.
"""
pass
class Graph(IGraph):
"""
Implementation of an undirected graph, based on IGraph.
Attributes
----------
Graph.WEIGHT_ATTRIBUTE_NAME : str
Name of weight attribute in graph.
Graph.DEFAULT_WEIGHT : float
Weight set by default.
Graph.LABEL_ATTRIBUTE_NAME : str
Default name of attribute. Not used.
Graph.DEFAULT_LABEL : str
Label set by default. Not used.
"""
WEIGHT_ATTRIBUTE_NAME = "weight"
DEFAULT_WEIGHT = 0
LABEL_ATTRIBUTE_NAME = "label"
DEFAULT_LABEL = ""
def __init__(self):
"""Initializes object."""
# Metadata about edges
# Mapping: Edge -> Dict mapping, lablel-> str, wt->num
self.edge_properties = {}
# Key value pairs: (Edge -> Attributes)
self.edge_attr = {}
# Metadata about nodes
# Pairing: Node -> Attributes
self.node_attr = {}
# Pairing: Node -> Neighbors
self.node_neighbors = {}
def has_edge(self, edge):
"""Returns whether an edge exists.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
bool
True if `edge` exists, False otherwise.
"""
u, v = edge
return (u, v) in self.edge_properties and (v, u) in self.edge_properties
def edge_weight(self, edge):
"""Returns weight of given edge.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
float
Edge weight.
"""
return self.get_edge_properties(edge).setdefault(self.WEIGHT_ATTRIBUTE_NAME, self.DEFAULT_WEIGHT)
def neighbors(self, node):
"""Returns all nodes that are directly accessible from given node.
Parameters
----------
node : hashable
Given node identifier.
Returns
-------
list of hashable
Nodes directly accessible from given `node`.
"""
return self.node_neighbors[node]
def has_node(self, node):
"""Returns whether the requested node exists.
Parameters
----------
node : hashable
Given node.
Returns
-------
bool
True if `node` exists, False otherwise.
"""
return node in self.node_neighbors
def add_edge(self, edge, wt=1, label='', attrs=None):
"""Adds an edge to the graph connecting two nodes.
Parameters
----------
edge : (hashable, hashable)
Given edge.
wt : float, optional
Weight of new edge.
label : str, optional
Edge label.
attrs : list, optional
Node attributes specified as (attribute, value).
Raises
------
ValueError
If `edge` already exists in graph.
"""
if attrs is None:
attrs = []
u, v = edge
if v not in self.node_neighbors[u] and u not in self.node_neighbors[v]:
self.node_neighbors[u].append(v)
if u != v:
self.node_neighbors[v].append(u)
self.add_edge_attributes((u, v), attrs)
self.set_edge_properties((u, v), label=label, weight=wt)
else:
raise ValueError("Edge (%s, %s) already in graph" % (u, v))
def add_node(self, node, attrs=None):
"""Adds given node to the graph.
Note
----
While nodes can be of any type, it's strongly recommended
to use only numbers and single-line strings as node identifiers if you
intend to use write().
Parameters
----------
node : hashable
Given node.
attrs : list of (hashable, hashable), optional
Node attributes specified as (attribute, value)
Raises
------
ValueError
If `node` already exists in graph.
"""
if attrs is None:
attrs = []
if node not in self.node_neighbors:
self.node_neighbors[node] = []
self.node_attr[node] = attrs
else:
raise ValueError("Node %s already in graph" % node)
def nodes(self):
"""Returns all nodes of the graph.
Returns
-------
list of hashable
Nodes of graph.
"""
return list(self.node_neighbors.keys())
def edges(self):
"""Returns all edges of the graph.
Returns
-------
list of (hashable, hashable)
Edges of graph.
"""
return [a for a in self.edge_properties.keys()]
def del_node(self, node):
"""Removes given node and its edges from the graph.
Parameters
----------
node : hashable
Given node.
"""
for each in list(self.neighbors(node)):
if each != node:
self.del_edge((each, node))
del self.node_neighbors[node]
del self.node_attr[node]
def get_edge_properties(self, edge):
"""Returns properties of given given edge. If edge doesn't exist
empty dictionary will be returned.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
dict
Properties of graph.
"""
return self.edge_properties.setdefault(edge, {})
def add_edge_attributes(self, edge, attrs):
"""Adds attributes `attrs` to given edge, order of nodes in edge doesn't matter.
Parameters
----------
edge : (hashable, hashable)
Given edge.
attrs : list
Provided attributes to add.
"""
for attr in attrs:
self.add_edge_attribute(edge, attr)
def add_edge_attribute(self, edge, attr):
"""Adds attribute `attr` to given edge, order of nodes in edge doesn't matter.
Parameters
----------
edge : (hashable, hashable)
Given edge.
attr : object
Provided attribute to add.
"""
self.edge_attr[edge] = self.edge_attributes(edge) + [attr]
if edge[0] != edge[1]:
self.edge_attr[(edge[1], edge[0])] = self.edge_attributes((edge[1], edge[0])) + [attr]
def edge_attributes(self, edge):
"""Returns attributes of given edge.
Note
----
In case of non existing edge returns empty list.
Parameters
----------
edge : (hashable, hashable)
Given edge.
Returns
-------
list
Attributes of given edge.
"""
try:
return self.edge_attr[edge]
except KeyError:
return []
def set_edge_properties(self, edge, **properties):
"""Adds `properties` to given edge, order of nodes in edge doesn't matter.
Parameters
----------
edge : (hashable, hashable)
Given edge.
properties : dict
Properties to add.
"""
self.edge_properties.setdefault(edge, {}).update(properties)
if edge[0] != edge[1]:
self.edge_properties.setdefault((edge[1], edge[0]), {}).update(properties)
def del_edge(self, edge):
"""Removes given edges from the graph.
Parameters
----------
edge : (hashable, hashable)
Given edge.
"""
u, v = edge
self.node_neighbors[u].remove(v)
self.del_edge_labeling((u, v))
if u != v:
self.node_neighbors[v].remove(u)
self.del_edge_labeling((v, u))
def del_edge_labeling(self, edge):
"""Removes attributes and properties of given edge.
Parameters
----------
edge : (hashable, hashable)
Given edge.
"""
keys = [edge, edge[::-1]]
for key in keys:
for mapping in [self.edge_properties, self.edge_attr]:
try:
del mapping[key]
except KeyError:
pass
| 12,075 | 22.632094 | 109 | py |
poincare_glove | poincare_glove-master/gensim/summarization/__init__.py |
# bring model classes directly into package namespace, to save some typing
from .summarizer import summarize, summarize_corpus # noqa:F401
from .keywords import keywords # noqa:F401
from .mz_entropy import mz_keywords # noqa:F401
| 234 | 38.166667 | 74 | py |
poincare_glove | poincare_glove-master/gensim/summarization/textcleaner.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions and processors used for processing text,
extracting sentences from text, working with acronyms and abbreviations.
Data
----
.. data:: SEPARATOR - Special separator used in abbreviations.
.. data:: RE_SENTENCE - Pattern to split text to sentences.
.. data:: AB_SENIOR - Pattern for detecting abbreviations (example: Sgt. Pepper).
.. data:: AB_ACRONYM - Pattern for detecting acronyms.
.. data:: AB_ACRONYM_LETTERS - Pattern for detecting acronyms (example: P.S. I love you).
.. data:: UNDO_AB_SENIOR - Pattern like AB_SENIOR but with SEPARATOR between abbreviation and next word.
.. data:: UNDO_AB_ACRONYM - Pattern like AB_ACRONYM but with SEPARATOR between abbreviation and next word.
"""
from gensim.summarization.syntactic_unit import SyntacticUnit
from gensim.parsing.preprocessing import preprocess_documents
from gensim.utils import tokenize
from six.moves import xrange
import re
import logging
logger = logging.getLogger('summarizer.preprocessing.cleaner')
try:
from pattern.en import tag
logger.info("'pattern' package found; tag filters are available for English")
HAS_PATTERN = True
except ImportError:
logger.info("'pattern' package not found; tag filters are not available for English")
HAS_PATTERN = False
SEPARATOR = r'@'
RE_SENTENCE = re.compile(r'(\S.+?[.!?])(?=\s+|$)|(\S.+?)(?=[\n]|$)', re.UNICODE)
AB_SENIOR = re.compile(r'([A-Z][a-z]{1,2}\.)\s(\w)', re.UNICODE)
AB_ACRONYM = re.compile(r'(\.[a-zA-Z]\.)\s(\w)', re.UNICODE)
AB_ACRONYM_LETTERS = re.compile(r'([a-zA-Z])\.([a-zA-Z])\.', re.UNICODE)
UNDO_AB_SENIOR = re.compile(r'([A-Z][a-z]{1,2}\.)' + SEPARATOR + r'(\w)', re.UNICODE)
UNDO_AB_ACRONYM = re.compile(r'(\.[a-zA-Z]\.)' + SEPARATOR + r'(\w)', re.UNICODE)
def split_sentences(text):
"""Split and get list of sentences from given text. It preserves abbreviations set in
:const:`~gensim.summarization.textcleaner.AB_SENIOR` and :const:`~gensim.summarization.textcleaner.AB_ACRONYM`.
Parameters
----------
text : str
Input text.
Returns
-------
list of str
Sentences of given text.
Example
-------
>>> from gensim.summarization.textcleaner import split_sentences
>>> text = '''Beautiful is better than ugly.
... Explicit is better than implicit. Simple is better than complex.'''
>>> split_sentences(text)
['Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.']
"""
processed = replace_abbreviations(text)
return [undo_replacement(sentence) for sentence in get_sentences(processed)]
def replace_abbreviations(text):
"""Replace blank space to '@' separator after abbreviation and next word.
Parameters
----------
text : str
Input sentence.
Returns
-------
str
Sentence with changed separator.
Example
-------
>>> replace_abbreviations("God bless you, please, Mrs. Robinson")
God bless you, please, Mrs.@Robinson
"""
return replace_with_separator(text, SEPARATOR, [AB_SENIOR, AB_ACRONYM])
def undo_replacement(sentence):
"""Replace `@` separator back to blank space after each abbreviation.
Parameters
----------
sentence : str
Input sentence.
Returns
-------
str
Sentence with changed separator.
Example
-------
>>> undo_replacement("God bless you, please, Mrs.@Robinson")
God bless you, please, Mrs. Robinson
"""
return replace_with_separator(sentence, r" ", [UNDO_AB_SENIOR, UNDO_AB_ACRONYM])
def replace_with_separator(text, separator, regexs):
"""Get text with replaced separator if provided regular expressions were matched.
Parameters
----------
text : str
Input text.
separator : str
The separator between words to be replaced.
regexs : list of `_sre.SRE_Pattern`
Regular expressions used in processing text.
Returns
-------
str
Text with replaced separators.
"""
replacement = r"\1" + separator + r"\2"
result = text
for regex in regexs:
result = regex.sub(replacement, result)
return result
def get_sentences(text):
"""Sentence generator from provided text. Sentence pattern set
in :const:`~gensim.summarization.textcleaner.RE_SENTENCE`.
Parameters
----------
text : str
Input text.
Yields
------
str
Single sentence extracted from text.
Example
-------
>>> text = "Does this text contains two sentences? Yes, it does."
>>> for sentence in get_sentences(text):
>>> print(sentence)
Does this text contains two sentences?
Yes, it does.
"""
for match in RE_SENTENCE.finditer(text):
yield match.group()
def merge_syntactic_units(original_units, filtered_units, tags=None):
"""Process given sentences and its filtered (tokenized) copies into
:class:`~gensim.summarization.syntactic_unit.SyntacticUnit`. Also adds tags if they are provided to produced units.
Parameters
----------
original_units : list
List of original sentences.
filtered_units : list
List of tokenized sentences.
tags : list of str, optional
List of strings used as tags for each unit. None as deafault.
Returns
-------
list of :class:~gensim.summarization.syntactic_unit.SyntacticUnit
List of syntactic units (sentences).
"""
units = []
for i in xrange(len(original_units)):
if filtered_units[i] == '':
continue
text = original_units[i]
token = filtered_units[i]
tag = tags[i][1] if tags else None
sentence = SyntacticUnit(text, token, tag)
sentence.index = i
units.append(sentence)
return units
def join_words(words, separator=" "):
"""Concatenates `words` with `separator` between elements.
Parameters
----------
words : list of str
Given words.
separator : str, optional
The separator between elements.
Returns
-------
str
String of merged words with separator between elements.
"""
return separator.join(words)
def clean_text_by_sentences(text):
"""Tokenize a given text into sentences, applying filters and lemmatize them.
Parameters
----------
text : str
Given text.
Returns
-------
list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit`
Sentences of the given text.
"""
original_sentences = split_sentences(text)
filtered_sentences = [join_words(sentence) for sentence in preprocess_documents(original_sentences)]
return merge_syntactic_units(original_sentences, filtered_sentences)
def clean_text_by_word(text, deacc=True):
"""Tokenize a given text into words, applying filters and lemmatize them.
Parameters
----------
text : str
Given text.
deacc : bool, optional
Remove accentuation if True.
Returns
-------
dict
Words as keys, :class:`~gensim.summarization.syntactic_unit.SyntacticUnit` as values.
Example
-------
>>> from gensim.summarization.textcleaner import clean_text_by_word
>>> clean_text_by_word("God helps those who help themselves")
{'god': Original unit: 'god' *-*-*-* Processed unit: 'god',
'help': Original unit: 'help' *-*-*-* Processed unit: 'help',
'helps': Original unit: 'helps' *-*-*-* Processed unit: 'help'}
"""
text_without_acronyms = replace_with_separator(text, "", [AB_ACRONYM_LETTERS])
original_words = list(tokenize(text_without_acronyms, to_lower=True, deacc=deacc))
filtered_words = [join_words(word_list, "") for word_list in preprocess_documents(original_words)]
if HAS_PATTERN:
tags = tag(join_words(original_words)) # tag needs the context of the words in the text
else:
tags = None
units = merge_syntactic_units(original_words, filtered_words, tags)
return {unit.text: unit for unit in units}
def tokenize_by_word(text):
"""Tokenize input text. Before tokenizing transforms text to lower case and removes accentuation and acronyms set
:const:`~gensim.summarization.textcleaner.AB_ACRONYM_LETTERS`.
Parameters
----------
text : str
Given text.
Returns
-------
generator
Generator that yields sequence words of the given text.
Example
-------
>>> from gensim.summarization.textcleaner import tokenize_by_word
>>> g = tokenize_by_word('Veni. Vedi. Vici.')
>>> print(next(g))
veni
>>> print(next(g))
vedi
>>> print(next(g))
vici
"""
text_without_acronyms = replace_with_separator(text, "", [AB_ACRONYM_LETTERS])
return tokenize(text_without_acronyms, to_lower=True, deacc=True)
| 8,966 | 27.740385 | 119 | py |
poincare_glove | poincare_glove-master/gensim/summarization/pagerank_weighted.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module calculate PageRank [1]_ based on wordgraph.
.. [1] https://en.wikipedia.org/wiki/PageRank
Examples
--------
Calculate Pagerank for words
>>> from gensim.summarization.keywords import get_graph
>>> from gensim.summarization.pagerank_weighted import pagerank_weighted
>>> graph = get_graph("The road to hell is paved with good intentions.")
>>> # result will looks like {'good': 0.70432858653171504, 'hell': 0.051128871128006126, ...}
>>> result = pagerank_weighted(graph)
Build matrix from graph
>>> from gensim.summarization.pagerank_weighted import build_adjacency_matrix
>>> build_adjacency_matrix(graph).todense()
matrix([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
import numpy
from numpy import empty as empty_matrix
from scipy.linalg import eig
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
from six.moves import xrange
def pagerank_weighted(graph, damping=0.85):
"""Get dictionary of `graph` nodes and its ranks.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
damping : float
Damping parameter, optional
Returns
-------
dict
Nodes of `graph` as keys, its ranks as values.
"""
adjacency_matrix = build_adjacency_matrix(graph)
probability_matrix = build_probability_matrix(graph)
pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix
vec = principal_eigenvector(pagerank_matrix.T)
# Because pagerank_matrix is positive, vec is always real (i.e. not complex)
return process_results(graph, vec.real)
def build_adjacency_matrix(graph):
"""Get matrix representation of given `graph`.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
Returns
-------
:class:`scipy.sparse.csr_matrix`, shape = [n, n]
Adjacency matrix of given `graph`, n is number of nodes.
"""
row = []
col = []
data = []
nodes = graph.nodes()
length = len(nodes)
for i in xrange(length):
current_node = nodes[i]
neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))
for j in xrange(length):
edge_weight = float(graph.edge_weight((current_node, nodes[j])))
if i != j and edge_weight != 0.0:
row.append(i)
col.append(j)
data.append(edge_weight / neighbors_sum)
return csr_matrix((data, (row, col)), shape=(length, length))
def build_probability_matrix(graph):
"""Get square matrix of shape (n, n), where n is number of nodes of the
given `graph`.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
Returns
-------
numpy.ndarray, shape = [n, n]
Eigenvector of matrix `a`, n is number of nodes of `graph`.
"""
dimension = len(graph.nodes())
matrix = empty_matrix((dimension, dimension))
probability = 1.0 / float(dimension)
matrix.fill(probability)
return matrix
def principal_eigenvector(a):
"""Get eigenvector of square matrix `a`.
Parameters
----------
a : numpy.ndarray, shape = [n, n]
Given matrix.
Returns
-------
numpy.ndarray, shape = [n, ]
Eigenvector of matrix `a`.
"""
# Note that we prefer to use `eigs` even for dense matrix
# because we need only one eigenvector. See #441, #438 for discussion.
# But it doesn't work for dim A < 3, so we just handle this special case
if len(a) < 3:
vals, vecs = eig(a)
ind = numpy.abs(vals).argmax()
return vecs[:, ind]
else:
vals, vecs = eigs(a, k=1)
return vecs[:, 0]
def process_results(graph, vec):
"""Get `graph` nodes and corresponding absolute values of provided eigenvector.
This function is helper for :func:`~gensim.summarization.pagerank_weighted.pagerank_weighted`
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
vec : numpy.ndarray, shape = [n, ]
Given eigenvector, n is number of nodes of `graph`.
Returns
-------
dict
Graph nodes as keys, corresponding elements of eigenvector as values.
"""
scores = {}
for i, node in enumerate(graph.nodes()):
scores[node] = abs(vec[i])
return scores
| 4,714 | 25.789773 | 118 | py |
poincare_glove | poincare_glove-master/gensim/corpora/svmlightcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in SVMlight format."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class SvmLightCorpus(IndexedCorpus):
"""Corpus in SVMlight format.
Quoting http://svmlight.joachims.org/:
The input file contains the training examples. The first lines may contain comments and are ignored
if they start with #. Each of the following lines represents one training example
and is of the following format::
<line> .=. <target> <feature>:<value> <feature>:<value> ... <feature>:<value> # <info>
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
The "qid" feature (used for SVMlight ranking), if present, is ignored.
Notes
-----
Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based
(counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight
input file, and increment them again when saving as SVMlight.
"""
def __init__(self, fname, store_labels=True):
"""
Parameters
----------
fname: str
Path to corpus.
store_labels : bool, optional
Whether to store labels (~SVM target class). They currently have no application but stored
in `self.labels` for convenience by default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.length = None
self.store_labels = store_labels
self.labels = []
def __iter__(self):
""" Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
lineno = -1
self.labels = []
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
doc = self.line2doc(line)
if doc is not None:
if self.store_labels:
self.labels.append(doc[1])
yield doc[0]
self.length = lineno + 1
@staticmethod
def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False):
"""Save a corpus in the SVMlight format.
The SVMlight `<target>` class tag is taken from the `labels` array, or set to 0 for all documents
if `labels` is not supplied.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
labels : list or False
An SVMlight `<target>` class tags or False if not present.
metadata : bool
ARGUMENT WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
logger.info("converting corpus to SVMlight format: %s", fname)
offsets = []
with utils.smart_open(fname, 'wb') as fout:
for docno, doc in enumerate(corpus):
label = labels[docno] if labels else 0 # target class is 0 by default
offsets.append(fout.tell())
fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label)))
return offsets
def docbyoffset(self, offset):
"""Get the document stored at file position `offset`.
Parameters
----------
offset : int
Document's position.
Returns
-------
tuple of (int, float)
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())[0]
# TODO: it brakes if gets None from line2doc
def line2doc(self, line):
"""Get a document from a single line in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`.
Parameters
----------
line : str
Line in SVMLight format.
Returns
-------
(list of (int, float), str)
Document in BoW format and target class label.
"""
line = utils.to_unicode(line)
line = line[: line.find('#')].strip()
if not line:
return None # ignore comments and empty lines
parts = line.split()
if not parts:
raise ValueError('invalid line format in %s' % self.fname)
target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]]
# ignore 'qid' features, convert 1-based feature ids to 0-based
doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid']
return doc, target
@staticmethod
def doc2line(doc, label=0):
"""Convert BoW representation of document in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`.
Parameters
----------
doc : list of (int, float)
Document in BoW format.
label : int, optional
Document label (if provided).
Returns
-------
str
`doc` in SVMlight format.
"""
pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base
return "%s %s\n" % (label, pairs)
| 5,903 | 30.572193 | 118 | py |
poincare_glove | poincare_glove-master/gensim/corpora/bleicorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Сorpus in Blei's LDA-C format."""
from __future__ import with_statement
from os import path
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six.moves import xrange
logger = logging.getLogger(__name__)
class BleiCorpus(IndexedCorpus):
"""Corpus in Blei's LDA-C format.
The corpus is represented as two files: one describing the documents, and another
describing the mapping between words and their ids.
Each document is one line::
N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN
The vocabulary is a file with words, one word per line; word at line K has an implicit `id=K`.
"""
def __init__(self, fname, fname_vocab=None):
"""
Parameters
----------
fname : str
Path to corpus.
fname_vocab : str, optional
Vocabulary file. If `fname_vocab` is None, searching one of variants:
* `fname`.vocab
* `fname`/vocab.txt
* `fname_without_ext`.vocab
* `fname_folder`/vocab.txt
Raises
------
IOError
If vocabulary file doesn't exist.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
if fname_vocab is None:
fname_base, _ = path.splitext(fname)
fname_dir = path.dirname(fname)
for fname_vocab in [
utils.smart_extension(fname, '.vocab'),
utils.smart_extension(fname, '/vocab.txt'),
utils.smart_extension(fname_base, '.vocab'),
utils.smart_extension(fname_dir, '/vocab.txt'),
]:
if path.exists(fname_vocab):
break
else:
raise IOError('BleiCorpus: could not find vocabulary file')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [utils.to_unicode(word).rstrip() for word in fin]
self.id2word = dict(enumerate(words))
def __iter__(self):
"""Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document's BoW representation.
"""
lineno = -1
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
yield self.line2doc(line)
self.length = lineno + 1
def line2doc(self, line):
"""Convert line in Blei LDA-C format to document (BoW representation).
Parameters
----------
line : str
Line in Blei's LDA-C format.
Returns
-------
list of (int, float)
Document's BoW representation.
"""
parts = utils.to_unicode(line).split()
if int(parts[0]) != len(parts) - 1:
raise ValueError("invalid format in %s: %s" % (self.fname, repr(line)))
doc = [part.rsplit(':', 1) for part in parts[1:]]
doc = [(int(p1), float(p2)) for p1, p2 in doc]
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the LDA-C format.
Notes
-----
There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Input corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word for `corpus`.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
logger.info("storing corpus in Blei's LDA-C format into %s", fname)
with utils.smart_open(fname, 'wb') as fout:
offsets = []
for doc in corpus:
doc = list(doc)
offsets.append(fout.tell())
parts = ["%i:%g" % p for p in doc if abs(p[1]) > 1e-7]
fout.write(utils.to_utf8("%i %s\n" % (len(doc), ' '.join(parts))))
# write out vocabulary, in a format compatible with Blei's topics.py script
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s", num_terms, fname_vocab)
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
return offsets
def docbyoffset(self, offset):
"""Get document corresponding to `offset`.
Offset can be given from :meth:`~gensim.corpora.bleicorpus.BleiCorpus.save_corpus`.
Parameters
----------
offset : int
Position of the document in the file (in bytes).
Returns
-------
list of (int, float)
Document in BoW format.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
| 5,755 | 30.113514 | 114 | py |
poincare_glove | poincare_glove-master/gensim/corpora/dictionary.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the concept of Dictionary -- a mapping between words and their integer ids."""
from __future__ import with_statement
from collections import Mapping, defaultdict
import sys
import logging
import itertools
from gensim import utils
from six import PY3, iteritems, iterkeys, itervalues, string_types
from six.moves import xrange
from six.moves import zip as izip
if sys.version_info[0] >= 3:
unicode = str
logger = logging.getLogger(__name__)
class Dictionary(utils.SaveLoad, Mapping):
"""Dictionary encapsulates the mapping between normalized words and their integer ids.
Attributes
---------
token2id : dict of (str, int)
token -> tokenId.
id2token : dict of (int, str)
Reverse mapping for token2id, initialized in lazy manner to save memory.
dfs : dict of (int, int)
Document frequencies: token_id -> in how many documents contain this token.
num_docs : int
Number of documents processed.
num_pos : int
Total number of corpus positions (number of processed words).
num_nnz : int
Total number of non-zeroes in the BOW matrix.
"""
def __init__(self, documents=None, prune_at=2000000):
"""
Parameters
----------
documents : iterable of iterable of str, optional
Documents that used for initialization.
prune_at : int, optional
Total number of unique words. Dictionary will keep not more than `prune_at` words.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = Dictionary(texts) # fit dictionary
>>> dct.add_documents([["cat", "say", "meow"], ["dog"]]) # update dictionary with new documents
>>> dct.doc2bow(["dog", "computer", "non_existent_word"])
[(0, 1), (6, 1)]
"""
self.token2id = {}
self.id2token = {}
self.dfs = {}
self.num_docs = 0
self.num_pos = 0
self.num_nnz = 0
if documents is not None:
self.add_documents(documents, prune_at=prune_at)
def __getitem__(self, tokenid):
"""Get token by provided `tokenid`.
Parameters
----------
tokenid : int
Id of token
Returns
-------
str
Token corresponding to `tokenid`.
Raises
------
KeyError
If `tokenid` isn't contained in :class:`~gensim.corpora.dictionary.Dictionary`.
"""
if len(self.id2token) != len(self.token2id):
# the word->id mapping has changed (presumably via add_documents);
# recompute id->word accordingly
self.id2token = utils.revdict(self.token2id)
return self.id2token[tokenid] # will throw for non-existent ids
def __iter__(self):
"""Iterate over tokens that stored."""
return iter(self.keys())
if PY3:
# restore Py2-style dict API
iterkeys = __iter__
def iteritems(self):
return self.items()
def itervalues(self):
return self.values()
def keys(self):
"""Get all stored ids.
Returns
-------
list of int
List of all token ids.
"""
return list(self.token2id.values())
def __len__(self):
"""Get number of stored tokens.
Returns
-------
int
Number of stored tokens.
"""
return len(self.token2id)
def __str__(self):
some_keys = list(itertools.islice(iterkeys(self.token2id), 5))
return "Dictionary(%i unique tokens: %s%s)" % (len(self), some_keys, '...' if len(self) > 5 else '')
@staticmethod
def from_documents(documents):
"""Create :class:`~gensim.corpora.dictionary.Dictionary` based on `documents`
Parameters
----------
documents : iterable of iterable of str
Input corpus.
Returns
-------
:class:`~gensim.corpora.dictionary.Dictionary`
Dictionary filled by `documents`.
"""
return Dictionary(documents=documents)
def add_documents(self, documents, prune_at=2000000):
"""Update dictionary from a collection of `documents`.
Parameters
----------
documents : iterable of iterable of str
Input corpus. All tokens should be already **tokenized and normalized**.
prune_at : int, optional
Total number of unique words. Dictionary will keep not more than `prune_at` words.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = ["máma mele maso".split(), "ema má máma".split()]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.add_documents([["this","is","sparta"],["just","joking"]])
>>> len(dct)
10
"""
for docno, document in enumerate(documents):
# log progress & run a regular check for pruning, once every 10k docs
if docno % 10000 == 0:
if prune_at is not None and len(self) > prune_at:
self.filter_extremes(no_below=0, no_above=1.0, keep_n=prune_at)
logger.info("adding document #%i to %s", docno, self)
# update Dictionary with the document
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words (BoW) format = list of (token_id, token_count).
Parameters
----------
document : list of str
Input document.
allow_update : bool, optional
If True - update dictionary in the process (i.e. add new tokens and update frequencies).
return_missing : bool, optional
Also return missing tokens (that doesn't contains in current dictionary).
Return
------
list of (int, int)
BoW representation of `document`
list of (int, int), dict of (str, int)
If `return_missing` is True, return BoW representation of `document` + dictionary with missing
tokens and their frequencies.
Examples
--------
>>> from gensim.corpora import Dictionary
>>> dct = Dictionary(["máma mele maso".split(), "ema má máma".split()])
>>> dct.doc2bow(["this","is","máma"])
[(2, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(2, 1)], {u'this': 1, u'is': 1})
"""
if isinstance(document, string_types):
raise TypeError("doc2bow expects an array of unicode tokens on input, not a single string")
# Construct (word, frequency) mapping.
counter = defaultdict(int)
for w in document:
counter[w if isinstance(w, unicode) else unicode(w, 'utf-8')] += 1
token2id = self.token2id
if allow_update or return_missing:
missing = sorted(x for x in iteritems(counter) if x[0] not in token2id)
if allow_update:
for w, _ in missing:
# new id = number of ids made so far;
# NOTE this assumes there are no gaps in the id sequence!
token2id[w] = len(token2id)
result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id}
if allow_update:
self.num_docs += 1
self.num_pos += sum(itervalues(counter))
self.num_nnz += len(result)
# increase document count for each unique token that appeared in the document
dfs = self.dfs
for tokenid in iterkeys(result):
dfs[tokenid] = dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, dict(missing)
else:
return result
def doc2idx(self, document, unknown_word_index=-1):
"""Convert `document` (a list of words) into a list of indexes = list of `token_id`.
Notes
-----
Replace all unknown words i.e, words not in the dictionary with the index as set via `unknown_word_index`.
Parameters
----------
document : list of str
Input document
unknown_word_index : int, optional
Index to use for words not in the dictionary.
Returns
-------
list of int
Indexes in the dictionary for words in the `document` (preserving the order of words).
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["a", "a", "b"], ["a", "c"]]
>>> dct = Dictionary(corpus)
>>> dct.doc2idx(["a", "a", "c", "not_in_dictionary", "c"])
[0, 0, 2, -1, 2]
"""
if isinstance(document, string_types):
raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string")
document = [word if isinstance(word, unicode) else unicode(word, 'utf-8') for word in document]
return [self.token2id.get(word, unknown_word_index) for word in document]
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000, keep_tokens=None):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
keep_tokens : iterable of str
Iterable of tokens that **must** stay in dictionary after filtering.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
After the pruning, shrink resulting gaps in word ids.
Due to the gap shrinking, the same word may have a different word id before and after the call to this function!
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> len(dct)
1
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
# determine which tokens to keep
if keep_tokens:
keep_ids = [self.token2id[v] for v in keep_tokens if v in self.token2id]
good_ids = (
v for v in itervalues(self.token2id)
if no_below <= self.dfs.get(v, 0) <= no_above_abs or v in keep_ids
)
else:
good_ids = (
v for v in itervalues(self.token2id)
if no_below <= self.dfs.get(v, 0) <= no_above_abs
)
good_ids = sorted(good_ids, key=self.dfs.get, reverse=True)
if keep_n is not None:
good_ids = good_ids[:keep_n]
bad_words = [(self[idx], self.dfs.get(idx, 0)) for idx in set(self).difference(good_ids)]
logger.info("discarding %i tokens: %s...", len(self) - len(good_ids), bad_words[:10])
logger.info(
"keeping %i tokens which were in no less than %i and no more than %i (=%.1f%%) documents",
len(good_ids), no_below, no_above_abs, 100.0 * no_above
)
# do the actual filtering, then rebuild dictionary to remove gaps in ids
self.filter_tokens(good_ids=good_ids)
logger.info("resulting dictionary: %s", self)
def filter_n_most_frequent(self, remove_n):
"""Filter out the 'remove_n' most frequent tokens that appear in the documents.
Parameters
----------
remove_n : int
Number of the most frequent tokens that will be removed.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> len(dct)
5
>>> dct.filter_n_most_frequent(2)
>>> len(dct)
3
"""
# determine which tokens to keep
most_frequent_ids = (v for v in itervalues(self.token2id))
most_frequent_ids = sorted(most_frequent_ids, key=self.dfs.get, reverse=True)
most_frequent_ids = most_frequent_ids[:remove_n]
# do the actual filtering, then rebuild dictionary to remove gaps in ids
most_frequent_words = [(self[idx], self.dfs.get(idx, 0)) for idx in most_frequent_ids]
logger.info("discarding %i tokens: %s...", len(most_frequent_ids), most_frequent_words[:10])
self.filter_tokens(bad_ids=most_frequent_ids)
logger.info("resulting dictionary: %s", self)
def filter_tokens(self, bad_ids=None, good_ids=None):
"""Remove the selected `bad_ids` tokens from :class:`~gensim.corpora.dictionary.Dictionary`.
Alternative - keep selected `good_ids` in :class:`~gensim.corpora.dictionary.Dictionary` and remove the rest.
Parameters
----------
bad_ids : iterable of int, optional
Collection of word ids to be removed.
good_ids : collection of int, optional
Keep selected collection of word ids and remove the rest.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = Dictionary(corpus)
>>> 'ema' in dct.token2id
True
>>> dct.filter_tokens(bad_ids=[dct.token2id['ema']])
>>> 'ema' in dct.token2id
False
>>> len(dct)
4
>>> dct.filter_tokens(good_ids=[dct.token2id['maso']])
>>> len(dct)
1
"""
if bad_ids is not None:
bad_ids = set(bad_ids)
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if tokenid not in bad_ids}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if tokenid not in bad_ids}
if good_ids is not None:
good_ids = set(good_ids)
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if tokenid in good_ids}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if tokenid in good_ids}
self.compactify()
def compactify(self):
"""Assign new word ids to all words, shrinking gaps."""
logger.debug("rebuilding dictionary, shrinking gaps")
# build mapping from old id -> new id
idmap = dict(izip(sorted(itervalues(self.token2id)), xrange(len(self.token2id))))
# reassign mappings to new ids
self.token2id = {token: idmap[tokenid] for token, tokenid in iteritems(self.token2id)}
self.id2token = {}
self.dfs = {idmap[tokenid]: freq for tokenid, freq in iteritems(self.dfs)}
def save_as_text(self, fname, sort_by_word=True):
"""Save :class:`~gensim.corpora.dictionary.Dictionary` to a text file.
Parameters
----------
fname : str
Path to output file.
sort_by_word : bool, optional
if True - sort by word in lexicographical order.
Notes
-----
Format::
num_docs
id_1[TAB]word_1[TAB]document_frequency_1[NEWLINE]
id_2[TAB]word_2[TAB]document_frequency_2[NEWLINE]
....
id_k[TAB]word_k[TAB]document_frequency_k[NEWLINE]
Warnings
--------
Text format should be use for corpus inspection. Use :meth:`~gensim.corpora.dictionary.Dictionary.save` and
:meth:`~gensim.corpora.dictionary.Dictionary.load` to store in binary format (pickle) for better performance.
See Also
--------
:meth:`~gensim.corpora.dictionary.Dictionary.load_from_text`
Examples
--------
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> tmp_fname = get_tmpfile("dictionary")
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>>
>>> dct = Dictionary(corpus)
>>> dct.save_as_text(tmp_fname)
>>>
>>> loaded_dct = Dictionary.load_from_text("testdata")
>>> assert dct.token2id == loaded_dct.token2id
"""
logger.info("saving dictionary mapping to %s", fname)
with utils.smart_open(fname, 'wb') as fout:
numdocs_line = "%d\n" % self.num_docs
fout.write(utils.to_utf8(numdocs_line))
if sort_by_word:
for token, tokenid in sorted(iteritems(self.token2id)):
line = "%i\t%s\t%i\n" % (tokenid, token, self.dfs.get(tokenid, 0))
fout.write(utils.to_utf8(line))
else:
for tokenid, freq in sorted(iteritems(self.dfs), key=lambda item: -item[1]):
line = "%i\t%s\t%i\n" % (tokenid, self[tokenid], freq)
fout.write(utils.to_utf8(line))
def merge_with(self, other):
"""Merge another dictionary into this dictionary, mapping same tokens to the same ids and new tokens to new ids.
Notes
-----
The purpose is to merge two corpora created using two different dictionaries: `self` and `other`.
`other` can be any id=>word mapping (a dict, a Dictionary object, ...).
Get a transformation object which, when accessed as `result[doc_from_other_corpus]`, will convert documents
from a corpus built using the `other` dictionary into a document using the new, merged dictionary.
Warnings
--------
This method will change `self` dictionary.
Parameters
----------
other : :class:`~gensim.corpora.dictionary.Dictionary`
Other dictionary.
Return
------
:class:`gensim.models.VocabTransform`
Transformation object.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus_1, corpus_2 = [["a", "b", "c"]], [["a", "f", "f"]]
>>> dct_1, dct_2 = Dictionary(corpus_1), Dictionary(corpus_2)
>>> dct_1.doc2bow(corpus_2[0])
[(0, 1)]
>>> transformer = dct_1.merge_with(dct_2)
>>> dct_1.doc2bow(corpus_2[0])
[(0, 1), (3, 2)]
"""
old2new = {}
for other_id, other_token in iteritems(other):
if other_token in self.token2id:
new_id = self.token2id[other_token]
else:
new_id = len(self.token2id)
self.token2id[other_token] = new_id
self.dfs[new_id] = 0
old2new[other_id] = new_id
try:
self.dfs[new_id] += other.dfs[other_id]
except Exception:
# `other` isn't a Dictionary (probably just a dict) => ignore dfs, keep going
pass
try:
self.num_docs += other.num_docs
self.num_nnz += other.num_nnz
self.num_pos += other.num_pos
except Exception:
pass
import gensim.models
return gensim.models.VocabTransform(old2new)
@staticmethod
def load_from_text(fname):
"""Load a previously stored :class:`~gensim.corpora.dictionary.Dictionary` from a text file.
Mirror function to :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`.
Parameters
----------
fname: str
Path to file produced by :meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`.
See Also
--------
:meth:`~gensim.corpora.dictionary.Dictionary.save_as_text`
Examples
--------
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> tmp_fname = get_tmpfile("dictionary")
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>>
>>> dct = Dictionary(corpus)
>>> dct.save_as_text(tmp_fname)
>>>
>>> loaded_dct = Dictionary.load_from_text("testdata")
>>> assert dct.token2id == loaded_dct.token2id
"""
result = Dictionary()
with utils.smart_open(fname) as f:
for lineno, line in enumerate(f):
line = utils.to_unicode(line)
if lineno == 0:
if line.strip().isdigit():
# Older versions of save_as_text may not write num_docs on first line.
result.num_docs = int(line.strip())
continue
else:
logging.warning("Text does not contain num_docs on the first line.")
try:
wordid, word, docfreq = line[:-1].split('\t')
except Exception:
raise ValueError("invalid line in dictionary file %s: %s"
% (fname, line.strip()))
wordid = int(wordid)
if word in result.token2id:
raise KeyError('token %s is defined as ID %d and as ID %d' % (word, wordid, result.token2id[word]))
result.token2id[word] = wordid
result.dfs[wordid] = int(docfreq)
return result
@staticmethod
def from_corpus(corpus, id2word=None):
"""Create :class:`~gensim.corpora.dictionary.Dictionary` from an existing corpus.
Parameters
----------
corpus : iterable of iterable of (int, number)
Corpus in BoW format.
id2word : dict of (int, object)
Mapping id -> word. If None, the mapping `id2word[word_id] = str(word_id)` will be used.
Notes
-----
This can be useful if you only have a term-document BOW matrix (represented by `corpus`), but not the original
text corpus. This method will scan the term-document count matrix for all word ids that appear in it,
then construct :class:`~gensim.corpora.dictionary.Dictionary` which maps each `word_id -> id2word[word_id]`.
`id2word` is an optional dictionary that maps the `word_id` to a token.
In case `id2word` isn't specified the mapping `id2word[word_id] = str(word_id)` will be used.
Returns
-------
:class:`~gensim.corpora.dictionary.Dictionary`
Inferred dictionary from corpus.
Examples
--------
>>> from gensim.corpora import Dictionary
>>>
>>> corpus = [[(1, 1.0)], [], [(0, 5.0), (2, 1.0)], []]
>>> dct = Dictionary.from_corpus(corpus)
>>> len(dct)
3
"""
result = Dictionary()
max_id = -1
for docno, document in enumerate(corpus):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, result)
result.num_docs += 1
result.num_nnz += len(document)
for wordid, word_freq in document:
max_id = max(wordid, max_id)
result.num_pos += word_freq
result.dfs[wordid] = result.dfs.get(wordid, 0) + 1
if id2word is None:
# make sure length(result) == get_max_id(corpus) + 1
result.token2id = {unicode(i): i for i in xrange(max_id + 1)}
else:
# id=>word mapping given: simply copy it
result.token2id = {utils.to_unicode(token): idx for idx, token in iteritems(id2word)}
for idx in itervalues(result.token2id):
# make sure all token ids have a valid `dfs` entry
result.dfs[idx] = result.dfs.get(idx, 0)
logger.info(
"built %s from %i documents (total %i corpus positions)",
result, result.num_docs, result.num_pos
)
return result
| 25,070 | 35.6 | 120 | py |
poincare_glove | poincare_glove-master/gensim/corpora/sharded_corpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Original author: Jan Hajic jr.
# Copyright (C) 2015 Radim Rehurek and gensim team.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module implements a corpus class that stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from it
on demand).
The corpus is intended for situations where you need to use your data
as numpy arrays for some iterative processing (like training something
using SGD, which usually involves heavy matrix multiplication).
"""
from __future__ import print_function
import logging
import os
import math
import numpy
import scipy.sparse as sparse
import time
from six.moves import xrange
import gensim
from gensim.corpora import IndexedCorpus
from gensim.interfaces import TransformedCorpus
logger = logging.getLogger(__name__)
#: Specifies which dtype should be used for serializing the shards.
_default_dtype = float
try:
import theano
_default_dtype = theano.config.floatX
except ImportError:
logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.')
class ShardedCorpus(IndexedCorpus):
"""
This corpus is designed for situations where you need to train a model
on matrices, with a large number of iterations. (It should be faster than
gensim's other IndexedCorpus implementations for this use case; check the
`benchmark_datasets.py` script. It should also serialize faster.)
The corpus stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from
it on demand). Persistence is done using the standard gensim load/save methods.
.. note::
The dataset is **read-only**, there is - as opposed to gensim's Similarity
class, which works similarly - no way of adding documents to the dataset
(for now).
You can use ShardedCorpus to serialize your data just like any other gensim
corpus that implements serialization. However, because the data is saved
as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to
supply the dimension of your data to the corpus. (The dimension of word
frequency vectors will typically be the size of the vocabulary, etc.)
>>> corpus = gensim.utils.mock_data()
>>> output_prefix = 'mydata.shdat'
>>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000)
The `output_prefix` tells the ShardedCorpus where to put the data.
Shards are saved as `output_prefix.0`, `output_prefix.1`, etc.
All shards must be of the same size. The shards can be re-sized (which
is essentially a re-serialization into new-size shards), but note that
this operation will temporarily take twice as much disk space, because
the old shards are not deleted until the new shards are safely in place.
After serializing the data, the corpus will then save itself to the file
`output_prefix`.
On further initialization with the same `output_prefix`, the corpus
will load the already built dataset unless the `overwrite` option is
given. (A new object is "cloned" from the one saved to `output_prefix`
previously.)
To retrieve data, you can load the corpus and use it like a list:
>>> sh_corpus = ShardedCorpus.load(output_prefix)
>>> batch = sh_corpus[100:150]
This will retrieve a numpy 2-dimensional array of 50 rows and 1000
columns (1000 was the dimension of the data we supplied to the corpus).
To retrieve gensim-style sparse vectors, set the `gensim` property:
>>> sh_corpus.gensim = True
>>> batch = sh_corpus[100:150]
The batch now will be a generator of gensim vectors.
Since the corpus needs the data serialized in order to be able to operate,
it will serialize data right away on initialization. Instead of calling
`ShardedCorpus.serialize()`, you can just initialize and use the corpus
right away:
>>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000)
>>> batch = corpus[100:150]
ShardedCorpus also supports working with scipy sparse matrices, both
during retrieval and during serialization. If you want to serialize your
data as sparse matrices, set the `sparse_serialization` flag. For
retrieving your data as sparse matrices, use the `sparse_retrieval`
flag. (You can also retrieve densely serialized data as sparse matrices,
for the sake of completeness, and vice versa.) By default, the corpus
will retrieve numpy ndarrays even if it was serialized into sparse
matrices.
>>> sparse_prefix = 'mydata.sparse.shdat'
>>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True)
>>> sparse_corpus = ShardedCorpus.load(sparse_prefix)
>>> batch = sparse_corpus[100:150]
>>> type(batch)
<type 'numpy.ndarray'>
>>> sparse_corpus.sparse_retrieval = True
>>> batch = sparse_corpus[100:150]
<class 'scipy.sparse.csr.csr_matrix'>
While you *can* touch the `sparse_retrieval` attribute during the life
of a ShardedCorpus object, you should definitely not touch `
`sharded_serialization`! Changing the attribute will not miraculously
re-serialize the data in the requested format.
The CSR format is used for sparse data throughout.
Internally, to retrieve data, the dataset keeps track of which shard is
currently open and on a `__getitem__` request, either returns an item from
the current shard, or opens a new one. The shard size is constant, except
for the last shard.
"""
def __init__(self, output_prefix, corpus, dim=None,
shardsize=4096, overwrite=False, sparse_serialization=False,
sparse_retrieval=False, gensim=False):
"""Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn't advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.
"""
self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim # This number may change during initialization/loading.
# Sparse vs. dense serialization and retrieval.
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
# The "state" of the dataset.
self.current_shard = None # The current shard itself (numpy ndarray)
self.current_shard_n = None # Current shard is the current_shard_n-th
self.current_offset = None # The index into the dataset which
# corresponds to index 0 of current shard
logger.info('Initializing sharded corpus with prefix %s', output_prefix)
if (not os.path.isfile(output_prefix)) or overwrite:
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
# Save automatically, to facilitate re-loading
# and retain information about how the corpus
# was serialized.
logger.info('Saving ShardedCorpus object to %s', self.output_prefix)
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
"""Initialize shards from the corpus."""
is_corpus, corpus = gensim.utils.is_corpus(corpus)
if not is_corpus:
raise ValueError(
"Cannot initialize shards without a corpus to read from! (Got corpus type: {0})".format(type(corpus))
)
proposed_dim = self._guess_n_features(corpus)
if proposed_dim != self.dim:
if self.dim is None:
logger.info('Deriving dataset dimension from corpus: %d', proposed_dim)
else:
logger.warning(
"Dataset dimension derived from input corpus differs from initialization argument, "
"using corpus. (corpus %d, init arg %d)", proposed_dim, self.dim
)
self.dim = proposed_dim
self.offsets = [0]
start_time = time.clock()
logger.info('Running init from corpus.')
for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. %d at %f s', n, time.clock() - start_time)
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: %d x %d', len(doc_chunk), self.dim)
for i, doc in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc))
# Handles the updating as well.
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.clock()
logger.info('Built %d shards in %f s.', self.n_shards, end_time - start_time)
def init_by_clone(self):
"""
Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().
"""
temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if temp.dim != self.dim:
if self.dim is None:
logger.info('Loaded dataset dimension: %d', temp.dim)
else:
logger.warning(
"Loaded dataset dimension differs from init arg dimension, "
"using loaded dim. (loaded %d, init %d)",
temp.dim, self.dim
)
self.dim = temp.dim # To be consistent with the loaded data!
def save_shard(self, shard, n=None, filename=None):
"""
Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.
"""
new_shard = False
if n is None:
n = self.n_shards # Saving the *next* one by default.
new_shard = True
if not filename:
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append(self.offsets[-1] + shard.shape[0])
self.n_docs += shard.shape[0]
self.n_shards += 1
def load_shard(self, n):
"""
Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object."""
# No-op if the shard is already open.
if self.current_shard_n == n:
return
filename = self._shard_name(n)
if not os.path.isfile(filename):
raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n))
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
def reset(self):
"""
Reset to no shard at all. Used for saving.
"""
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
def shard_by_offset(self, offset):
"""
Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.
"""
k = int(offset / self.shardsize)
if offset >= self.n_docs:
raise ValueError('Too high offset specified ({0}), available '
'docs: {1}'.format(offset, self.n_docs))
if offset < 0:
raise ValueError('Negative offset {0} currently not'
' supported.'.format(offset))
return k
def in_current(self, offset):
"""
Determine whether the given offset falls within the current shard.
"""
return (self.current_offset <= offset) and (offset < self.offsets[self.current_shard_n + 1])
def in_next(self, offset):
"""
Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.
"""
if self.current_shard_n == self.n_shards:
return False # There's no next shard.
return (self.offsets[self.current_shard_n + 1] <= offset) and (offset < self.offsets[self.current_shard_n + 2])
def resize_shards(self, shardsize):
"""
Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we're
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.
"""
# Determine how many new shards there will be
n_new_shards = int(math.floor(self.n_docs / float(shardsize)))
if self.n_docs % shardsize != 0:
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in xrange(n_new_shards):
new_start = shardsize * new_shard_idx
new_stop = new_start + shardsize
# Last shard?
if new_stop > self.n_docs:
# Sanity check
assert new_shard_idx == n_new_shards - 1, \
'Shard no. {0} that ends at {1} over last document' \
' ({2}) is not the last projected shard ({3})???' \
''.format(new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
# Clean up on unsuccessful resize.
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
# Move old shard files out, new ones in. Complicated due to possibility
# of exceptions.
old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)]
try:
for old_shard_n, old_shard_name in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.error(
'Exception occurred during old shard no. %d removal: %s.\nAttempting to at least move new shards in.',
old_shard_n, str(e)
)
finally:
# If something happens with cleaning up - try to at least get the
# new guys in.
try:
for shard_n, new_shard_name in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
# If something happens when we're in this stage, we're screwed.
except Exception as e:
logger.exception(e)
raise RuntimeError('Resizing completely failed for some reason. Sorry, dataset is probably ruined...')
finally:
# Sets the new shard stats.
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
def _shard_name(self, n):
"""Generate the name for the n-th shard."""
return self.output_prefix + '.' + str(n)
def _resized_shard_name(self, n):
"""
Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.
"""
return self.output_prefix + '.resize-temp.' + str(n)
def _guess_n_features(self, corpus):
"""Attempt to guess number of features in `corpus`."""
n_features = None
if hasattr(corpus, 'dim'):
# print 'Guessing from \'dim\' attribute.'
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
# print 'GUessing from dictionary.'
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
# print 'Guessing from \'n_out\' attribute.'
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
# print 'Guessing from \'num_terms\' attribute.'
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
# TransformedCorpus: first check if the transformer object
# defines some output dimension; if it doesn't, relegate guessing
# to the corpus that is being transformed. This may easily fail!
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
else:
if not self.dim:
raise TypeError(
"Couldn't find number of features, refusing to guess "
"(dimension set to {0}, type of corpus: {1})."
.format(self.dim, type(corpus))
)
else:
logger.warning("Couldn't find number of features, trusting supplied dimension (%d)", self.dim)
n_features = self.dim
if self.dim and n_features != self.dim:
logger.warning(
"Discovered inconsistent dataset dim (%d) and feature count from corpus (%d). "
"Coercing to dimension given by argument.",
self.dim, n_features
)
return n_features
def __len__(self):
return self.n_docs
def _ensure_shard(self, offset):
# No shard loaded
if self.current_shard is None:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
# Find appropriate shard, if necessary
elif not self.in_current(offset):
if self.in_next(offset):
self.load_shard(self.current_shard_n + 1)
else:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
def get_by_offset(self, offset):
"""As opposed to getitem, this one only accepts ints as offsets."""
self._ensure_shard(offset)
result = self.current_shard[offset - self.current_offset]
return result
def __getitem__(self, offset):
"""
Retrieve the given row of the dataset. Supports slice notation.
"""
if isinstance(offset, list):
# Handle all serialization & retrieval options.
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i)
for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif not self.sparse_retrieval:
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if stop > self.n_docs:
raise IndexError('Requested slice offset {0} out of range ({1} docs)'.format(stop, self.n_docs))
# - get range of shards over which to iterate
first_shard = self.shard_by_offset(start)
last_shard = self.n_shards - 1
if not stop == self.n_docs:
last_shard = self.shard_by_offset(stop)
# This fails on one-past
# slice indexing; that's why there's a code branch here.
self.load_shard(first_shard)
# The easy case: both in one shard.
if first_shard == last_shard:
s_result = self.current_shard[start - self.current_offset: stop - self.current_offset]
# Handle different sparsity settings:
s_result = self._getitem_format(s_result)
return s_result
# The hard case: the slice is distributed across multiple shards
# - initialize numpy.zeros()
s_result = numpy.zeros((stop - start, self.dim), dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim), dtype=self.current_shard.dtype)
# - gradually build it up. We will be using three set of start:stop
# indexes:
# - into the dataset (these are the indexes the caller works with)
# - into the current shard
# - into the result
# Indexes into current result rows. These are always smaller than
# the dataset indexes by `start` (as we move over the shards,
# we're moving by the same number of rows through the result).
result_start = 0
result_stop = self.offsets[self.current_shard_n + 1] - start
# Indexes into current shard. These are trickiest:
# - if in starting shard, these are from (start - current_offset)
# to self.shardsize
# - if in intermediate shard, these are from 0 to self.shardsize
# - if in ending shard, these are from 0
# to (stop - current_offset)
shard_start = start - self.current_offset
shard_stop = self.offsets[self.current_shard_n + 1] - self.current_offset
# s_result[result_start:result_stop] = self.current_shard[
# shard_start:shard_stop]
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
# First and last get special treatment, these are in between
for shard_n in xrange(first_shard + 1, last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
# Last shard
self.load_shard(last_shard)
result_start = result_stop
result_stop += stop - self.current_offset
shard_start = 0
shard_stop = stop - self.current_offset
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
"""
Add the rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the self.sparse_serialize setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Returns the resulting s_result.
"""
if (result_stop - result_start) != (stop - start):
raise ValueError(
'Result start/stop range different than stop/start range (%d - %d vs. %d - %d)'
.format(result_start, result_stop, start, stop)
)
# Dense data: just copy using numpy's slice notation
if not self.sparse_serialization:
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
# A bit more difficult, we're using a different structure to build the
# result.
else:
if s_result.shape != (result_start, self.dim):
raise ValueError(
'Assuption about sparse s_result shape invalid: {0} expected rows, {1} real rows.'
.format(result_start, s_result.shape[0])
)
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
def _getitem_format(self, s_result):
if self.sparse_serialization:
if self.gensim:
s_result = self._getitem_sparse2gensim(s_result)
elif not self.sparse_retrieval:
s_result = numpy.array(s_result.todense())
else:
if self.gensim:
s_result = self._getitem_dense2gensim(s_result)
elif self.sparse_retrieval:
s_result = sparse.csr_matrix(s_result)
return s_result
def _getitem_sparse2gensim(self, result):
"""
Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.
"""
def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx + 1]]
g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0]))
return output
def _getitem_dense2gensim(self, result):
"""Change given dense result matrix to gensim sparse vectors."""
if len(result.shape) == 1:
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i])
for i in xrange(result.shape[0]))
return output
# Overriding the IndexedCorpus and other corpus superclass methods
def __iter__(self):
"""
Yield dataset items one by one (generator).
"""
for i in xrange(len(self)):
yield self[i]
def save(self, *args, **kwargs):
"""
Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.
"""
# Can we save to a different file than output_prefix? Well, why not?
if len(args) == 0:
args = tuple([self.output_prefix])
attrs_to_ignore = ['current_shard', 'current_shard_n', 'current_offset']
if 'ignore' not in kwargs:
kwargs['ignore'] = frozenset(attrs_to_ignore)
else:
kwargs['ignore'] = frozenset([v for v in kwargs['ignore']] + attrs_to_ignore)
super(ShardedCorpus, self).save(*args, **kwargs)
@classmethod
def load(cls, fname, mmap=None):
"""
Load itself in clean state. `mmap` has no effect here.
"""
return super(ShardedCorpus, cls).load(fname, mmap)
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False, **kwargs):
"""
Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.
"""
ShardedCorpus(fname, corpus, **kwargs)
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None, progress_cnt=None,
labels=None, metadata=False, **kwargs):
"""
Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass."""
serializer.save_corpus(fname, corpus, id2word=id2word, progress_cnt=progress_cnt, metadata=metadata, **kwargs)
| 33,195 | 39.831488 | 119 | py |
poincare_glove | poincare_glove-master/gensim/corpora/csvcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Zygmunt Zając <zygmunt@fastml.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in CSV format."""
from __future__ import with_statement
import logging
import csv
import itertools
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class CsvCorpus(interfaces.CorpusABC):
"""Corpus in CSV format.
Notes
-----
The CSV delimiter, headers etc. are guessed automatically based on the file content.
All row values are expected to be ints/floats.
"""
def __init__(self, fname, labels):
"""
Parameters
----------
fname : str
Path to corpus.
labels : bool
If True - ignore first column (class labels).
"""
logger.info("loading corpus from %s", fname)
self.fname = fname
self.length = None
self.labels = labels
# load the first few lines, to guess the CSV dialect
head = ''.join(itertools.islice(utils.smart_open(self.fname), 5))
self.headers = csv.Sniffer().has_header(head)
self.dialect = csv.Sniffer().sniff(head)
logger.info("sniffed CSV delimiter=%r, headers=%s", self.dialect.delimiter, self.headers)
def __iter__(self):
"""Iterate over the corpus, returning one BoW vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
reader = csv.reader(utils.smart_open(self.fname), self.dialect)
if self.headers:
next(reader) # skip the headers
line_no = -1
for line_no, line in enumerate(reader):
if self.labels:
line.pop(0) # ignore the first column = class label
yield list(enumerate(float(x) for x in line))
self.length = line_no + 1 # store the total number of CSV rows = documents
| 1,973 | 26.041096 | 97 | py |
poincare_glove | poincare_glove-master/gensim/corpora/mmcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in the Matrix Market format."""
import logging
from gensim import matutils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class MmCorpus(matutils.MmReader, IndexedCorpus):
"""Corpus in matrix market format.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the rows (~documents).
Attributes
----------
num_docs : int
Number of documents in market matrix file.
num_terms : int
Number of terms.
num_nnz : int
Number of non-zero terms.
Notes
----------
Note that the file is read into memory one document at a time, not the whole matrix at once
(unlike :meth:`~scipy.io.mmread`). This allows us to process corpora which are larger than the available RAM.
Example
--------
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>> import gensim.downloader as api
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>> for document in corpus:
... pass
"""
def __init__(self, fname):
"""
Parameters
----------
fname : {str, file-like object}
Path to file in MM format or a file-like object that supports `seek()`
(e.g. :class:`gzip.GzipFile`, :class:`bz2.BZ2File`).
"""
# avoid calling super(), too confusing
IndexedCorpus.__init__(self, fname)
matutils.MmReader.__init__(self, fname)
def __iter__(self):
"""Iterate through document.
Yields
------
list of (int, str)
Document in BoW format.
Notes
------
The total number of vectors returned is always equal to the number of rows specified in the header.
Empty documents are inserted and yielded where appropriate, even if they are not explicitly stored in the
Matrix Market file.
"""
for doc_id, doc in super(MmCorpus, self).__iter__():
yield doc # get rid of doc id, return the sparse vector only
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False):
"""Save a corpus in the Matrix Market format to disk.
Parameters
----------
fname : str
Path to file.
corpus : iterable of list of (int, number)
Corpus in Bow format.
id2word : dict of (int, str), optional
WordId -> Word.
progress_cnt : int, optional
Progress counter.
metadata : bool, optional
If true, writes out additional metadata.
Notes
-----
This function is automatically called by `MmCorpus.serialize`; don't
call it directly, call `serialize` instead.
Example
-------
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>> import gensim.downloader as api
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>>
>>> MmCorpus.save_corpus("random", corpus) # Do not do it, use `serialize` instead.
[97, 121, 169, 201, 225, 249, 258, 276, 303]
"""
logger.info("storing corpus in Matrix Market format to %s", fname)
num_terms = len(id2word) if id2word is not None else None
return matutils.MmWriter.write_corpus(
fname, corpus, num_terms=num_terms, index=True, progress_cnt=progress_cnt, metadata=metadata
)
| 3,801 | 29.910569 | 113 | py |
poincare_glove | poincare_glove-master/gensim/corpora/malletcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in `Mallet format <http://mallet.cs.umass.edu/import.php>`_."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import LowCorpus
logger = logging.getLogger(__name__)
class MalletCorpus(LowCorpus):
"""Corpus handles input in `Mallet format <http://mallet.cs.umass.edu/import.php>`_.
**Format description**
One file, one instance per line, assume the data is in the following format ::
[URL] [language] [text of the page...]
Or, more generally, ::
[document #1 id] [label] [text of the document...]
[document #2 id] [label] [text of the document...]
...
[document #N id] [label] [text of the document...]
Note that language/label is *not* considered in Gensim, used `__unknown__` as default value.
Examples
--------
>>> from gensim.test.utils import datapath, get_tmpfile, common_texts
>>> from gensim.corpora import MalletCorpus
>>> from gensim.corpora import Dictionary
>>>
>>> # Prepare needed data
>>> dictionary = Dictionary(common_texts)
>>> corpus = [dictionary.doc2bow(doc) for doc in common_texts]
>>>
>>> # Write corpus in Mallet format to disk
>>> output_fname = get_tmpfile("corpus.mallet")
>>> MalletCorpus.serialize(output_fname, corpus, dictionary)
>>>
>>> # Read corpus
>>> loaded_corpus = MalletCorpus(output_fname)
"""
def __init__(self, fname, id2word=None, metadata=False):
"""
Parameters
----------
fname : str
Path to file in Mallet format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `fname`.
metadata : bool, optional
If True, return additional information ("document id" and "lang" when you call
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.line2doc`,
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.__iter__` or
:meth:`~gensim.corpora.malletcorpus.MalletCorpus.docbyoffset`
"""
self.metadata = metadata
LowCorpus.__init__(self, fname, id2word)
def _calculate_num_docs(self):
"""Get number of documents.
Returns
-------
int
Number of documents in file.
"""
with utils.smart_open(self.fname) as fin:
result = sum(1 for _ in fin)
return result
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
"""
with utils.smart_open(self.fname) as f:
for line in f:
yield self.line2doc(line)
def line2doc(self, line):
"""Covert line into document in BoW format.
Parameters
----------
line : str
Line from input file.
Returns
-------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
Examples
--------
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import MalletCorpus
>>>
>>> corpus = MalletCorpus(datapath("testcorpus.mallet"))
>>> corpus.line2doc("en computer human interface")
[(3, 1), (4, 1)]
"""
splited_line = [word for word in utils.to_unicode(line).strip().split(' ') if word]
docid, doclang, words = splited_line[0], splited_line[1], splited_line[2:]
doc = super(MalletCorpus, self).line2doc(' '.join(words))
if self.metadata:
return doc, (docid, doclang)
else:
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the Mallet format.
Warnings
--------
This function is automatically called by :meth:`gensim.corpora.malletcorpus.MalletCorpus.serialize`,
don't call it directly, call :meth:`gensim.corpora.lowcorpus.malletcorpus.MalletCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `corpus`.
metadata : bool, optional
If True - ????
Return
------
list of int
List of offsets in resulting file for each document (in bytes),
can be used for :meth:`~gensim.corpora.malletcorpus.Malletcorpus.docbyoffset`.
Notes
-----
The document id will be generated by enumerating the corpus.
That is, it will range between 0 and number of documents in the corpus.
Since Mallet has a language field in the format, this defaults to the string '__unknown__'.
If the language needs to be saved, post-processing will be required.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in Mallet format into %s", fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
for doc_id, doc in enumerate(corpus):
if metadata:
doc_id, doc_lang = doc[1]
doc = doc[0]
else:
doc_lang = '__unknown__'
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s %s %s\n' % (doc_id, doc_lang, ' '.join(words))))
if truncated:
logger.warning(
"Mallet format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""Get the document stored in file by `offset` position.
Parameters
----------
offset : int
Offset (in bytes) to begin of document.
Returns
-------
list of (int, int)
Document in BoW format (+"document_id" and "lang" if metadata=True).
Examples
--------
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import MalletCorpus
>>>
>>> data = MalletCorpus(datapath("testcorpus.mallet"))
>>> data.docbyoffset(1) # end of first line
[(3, 1), (4, 1)]
>>> data.docbyoffset(4) # start of second line
[(4, 1)]
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
| 7,510 | 31.656522 | 114 | py |
poincare_glove | poincare_glove-master/gensim/corpora/textcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Module provides some code scaffolding to simplify use of built dictionary for constructing BoW vectors.
Notes
-----
Text corpora usually reside on disk, as text files in one format or another In a common scenario,
we need to build a dictionary (a `word->integer id` mapping), which is then used to construct sparse bag-of-word vectors
(= iterable of `(word_id, word_weight)`).
This module provides some code scaffolding to simplify this pipeline. For example, given a corpus where each document
is a separate line in file on disk, you would override the :meth:`gensim.corpora.textcorpus.TextCorpus.get_texts`
to read one line=document at a time, process it (lowercase, tokenize, whatever) and yield it as a sequence of words.
Overriding :meth:`gensim.corpora.textcorpus.TextCorpus.get_texts` is enough, you can then initialize the corpus
with e.g. `MyTextCorpus("mycorpus.txt.bz2")` and it will behave correctly like a corpus of sparse vectors.
The :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__` method is automatically set up,
and dictionary is automatically populated with all `word->id` mappings.
The resulting object can be used as input to some of gensim models (:class:`~gensim.models.tfidfmodel.TfidfModel`,
:class:`~gensim.models.lsimodel.LsiModel`, :class:`~gensim.models.ldamodel.LdaModel`, ...), serialized with any format
(`Matrix Market <http://math.nist.gov/MatrixMarket/formats.html>`_,
`SvmLight <http://svmlight.joachims.org/>`_, `Blei's LDA-C format <https://github.com/blei-lab/lda-c>`_, etc).
See Also
--------
:class:`gensim.test.test_miislita.CorpusMiislita`
Good simple example.
"""
from __future__ import with_statement
import logging
import os
import random
import re
import sys
from gensim import interfaces, utils
from gensim.corpora.dictionary import Dictionary
from gensim.parsing.preprocessing import STOPWORDS, RE_WHITESPACE
from gensim.utils import deaccent, simple_tokenize
logger = logging.getLogger(__name__)
def remove_stopwords(tokens, stopwords=STOPWORDS):
"""Remove stopwords using list from `gensim.parsing.preprocessing.STOPWORDS`.
Parameters
----------
tokens : iterable of str
Sequence of tokens.
stopwords : iterable of str, optional
Sequence of stopwords
Returns
-------
list of str
List of tokens without `stopwords`.
"""
return [token for token in tokens if token not in stopwords]
def remove_short(tokens, minsize=3):
"""Remove tokens shorter than `minsize` chars.
Parameters
----------
tokens : iterable of str
Sequence of tokens.
minsize : int, optimal
Minimal length of token (include).
Returns
-------
list of str
List of tokens without short tokens.
"""
return [token for token in tokens if len(token) >= minsize]
def lower_to_unicode(text, encoding='utf8', errors='strict'):
"""Lowercase `text` and convert to unicode, using :func:`gensim.utils.any2unicode`.
Parameters
----------
text : str
Input text.
encoding : str, optional
Encoding that will be used for conversion.
errors : str, optional
Error handling behaviour, used as parameter for `unicode` function (python2 only).
Returns
-------
str
Unicode version of `text`.
See Also
--------
:func:`gensim.utils.any2unicode`
Convert any string to unicode-string.
"""
return utils.to_unicode(text.lower(), encoding, errors)
def strip_multiple_whitespaces(s):
"""Collapse multiple whitespace characters into a single space.
Parameters
----------
s : str
Input string
Returns
-------
str
String with collapsed whitespaces.
"""
return RE_WHITESPACE.sub(" ", s)
class TextCorpus(interfaces.CorpusABC):
"""Helper class to simplify the pipeline of getting BoW vectors from plain text.
Notes
-----
This is an abstract base class: override the :meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` and
:meth:`~gensim.corpora.textcorpus.TextCorpus.__len__` methods to match your particular input.
Given a filename (or a file-like object) in constructor, the corpus object will be automatically initialized
with a dictionary in `self.dictionary` and will support the :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__`
corpus method. You have a few different ways of utilizing this class via subclassing or by construction with
different preprocessing arguments.
The :meth:`~gensim.corpora.textcorpus.TextCorpus.__iter__` method converts the lists of tokens produced by
:meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` to BoW format using
:meth:`gensim.corpora.dictionary.Dictionary.doc2bow`.
:meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` does the following:
#. Calls :meth:`~gensim.corpora.textcorpus.TextCorpus.getstream` to get a generator over the texts.
It yields each document in turn from the underlying text file or files.
#. For each document from the stream, calls :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` to produce
a list of tokens. If metadata=True, it yields a 2-`tuple` with the document number as the second element.
Preprocessing consists of 0+ `character_filters`, a `tokenizer`, and 0+ `token_filters`.
The preprocessing consists of calling each filter in `character_filters` with the document text.
Unicode is not guaranteed, and if desired, the first filter should convert to unicode.
The output of each character filter should be another string. The output from the final filter is fed
to the `tokenizer`, which should split the string into a list of tokens (strings).
Afterwards, the list of tokens is fed through each filter in `token_filters`. The final output returned from
:meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` is the output from the final token filter.
So to use this class, you can either pass in different preprocessing functions using the
`character_filters`, `tokenizer`, and `token_filters` arguments, or you can subclass it.
If subclassing: override :meth:`~gensim.corpora.textcorpus.TextCorpus.getstream` to take text from different input
sources in different formats.
Override :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` if you must provide different initial
preprocessing, then call the :meth:`~gensim.corpora.textcorpus.TextCorpus.preprocess_text` method to apply
the normal preprocessing.
You can also override :meth:`~gensim.corpora.textcorpus.TextCorpus.get_texts` in order to tag the documents
(token lists) with different metadata.
The default preprocessing consists of:
#. :func:`~gensim.corpora.textcorpus.lower_to_unicode` - lowercase and convert to unicode (assumes utf8 encoding)
#. :func:`~gensim.utils.deaccent`- deaccent (asciifolding)
#. :func:`~gensim.corpora.textcorpus.strip_multiple_whitespaces` - collapse multiple whitespaces into a single one
#. :func:`~gensim.utils.simple_tokenize` - tokenize by splitting on whitespace
#. :func:`~gensim.corpora.textcorpus.remove_short` - remove words less than 3 characters long
#. :func:`~gensim.corpora.textcorpus.remove_stopwords` - remove stopwords
"""
def __init__(self, input=None, dictionary=None, metadata=False, character_filters=None,
tokenizer=None, token_filters=None):
"""
Parameters
----------
input : str, optional
Path to top-level directory (file) to traverse for corpus documents.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
If `input` is None, the dictionary will remain uninitialized.
metadata : bool, optional
If True - yield metadata with each document.
character_filters : iterable of callable, optional
Each will be applied to the text of each document in order, and should return a single string with
the modified text. For Python 2, the original text will not be unicode, so it may be useful to
convert to unicode as the first character filter.
If None - using :func:`~gensim.corpora.textcorpus.lower_to_unicode`,
:func:`~gensim.utils.deaccent` and :func:`~gensim.corpora.textcorpus.strip_multiple_whitespaces`.
tokenizer : callable, optional
Tokenizer for document, if None - using :func:`~gensim.utils.simple_tokenize`.
token_filters : iterable of callable, optional
Each will be applied to the iterable of tokens in order, and should return another iterable of tokens.
These filters can add, remove, or replace tokens, or do nothing at all.
If None - using :func:`~gensim.corpora.textcorpus.remove_short` and
:func:`~gensim.corpora.textcorpus.remove_stopwords`.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim import utils
>>>
>>>
>>> class CorpusMiislita(TextCorpus):
... stopwords = set('for a of the and to in on'.split())
...
... def get_texts(self):
... for doc in self.getstream():
... yield [word for word in utils.to_unicode(doc).lower().split() if word not in self.stopwords]
...
... def __len__(self):
... self.length = sum(1 for _ in self.get_texts())
... return self.length
>>>
>>> corpus = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
>>> len(corpus)
250
>>> document = next(iter(corpus.get_texts()))
"""
self.input = input
self.metadata = metadata
self.character_filters = character_filters
if self.character_filters is None:
self.character_filters = [lower_to_unicode, deaccent, strip_multiple_whitespaces]
self.tokenizer = tokenizer
if self.tokenizer is None:
self.tokenizer = simple_tokenize
self.token_filters = token_filters
if self.token_filters is None:
self.token_filters = [remove_short, remove_stopwords]
self.length = None
self.dictionary = None
self.init_dictionary(dictionary)
def init_dictionary(self, dictionary):
"""Initialize/update dictionary.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
Notes
-----
If self.input is None - make nothing.
"""
self.dictionary = dictionary if dictionary is not None else Dictionary()
if self.input is not None:
if dictionary is None:
logger.info("Initializing dictionary")
metadata_setting = self.metadata
self.metadata = False
self.dictionary.add_documents(self.get_texts())
self.metadata = metadata_setting
else:
logger.info("Input stream provided but dictionary already initialized")
else:
logger.warning("No input document stream provided; assuming dictionary will be initialized some other way.")
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format (+ metadata if self.metadata).
"""
if self.metadata:
for text, metadata in self.get_texts():
yield self.dictionary.doc2bow(text, allow_update=False), metadata
else:
for text in self.get_texts():
yield self.dictionary.doc2bow(text, allow_update=False)
def getstream(self):
"""Generate documents from the underlying plain text collection (of one or more files).
Yields
------
str
Document read from plain-text file.
Notes
-----
After generator end - initialize self.length attribute.
"""
num_texts = 0
with utils.file_or_filename(self.input) as f:
for line in f:
yield line
num_texts += 1
self.length = num_texts
def preprocess_text(self, text):
"""Apply `self.character_filters`, `self.tokenizer`, `self.token_filters` to a single text document.
Parameters
---------
text : str
Document read from plain-text file.
Return
------
list of str
List of tokens extracted from `text`.
"""
for character_filter in self.character_filters:
text = character_filter(text)
tokens = self.tokenizer(text)
for token_filter in self.token_filters:
tokens = token_filter(tokens)
return tokens
def step_through_preprocess(self, text):
"""Apply preprocessor one by one and generate result.
Warnings
--------
This is useful for debugging issues with the corpus preprocessing pipeline.
Parameters
----------
text : str
Document text read from plain-text file.
Yields
------
(callable, object)
Pre-processor, output from pre-processor (based on `text`)
"""
for character_filter in self.character_filters:
text = character_filter(text)
yield (character_filter, text)
tokens = self.tokenizer(text)
yield (self.tokenizer, tokens)
for token_filter in self.token_filters:
yield (token_filter, token_filter(tokens))
def get_texts(self):
"""Generate documents from corpus.
Yields
------
list of str
Document as sequence of tokens (+ lineno if self.metadata)
"""
lines = self.getstream()
if self.metadata:
for lineno, line in enumerate(lines):
yield self.preprocess_text(line), (lineno,)
else:
for line in lines:
yield self.preprocess_text(line)
def sample_texts(self, n, seed=None, length=None):
"""Generate `n` random documents from the corpus without replacement.
Parameters
----------
n : int
Number of documents we want to sample.
seed : int, optional
If specified, use it as a seed for local random generator.
length : int, optional
Value will used as corpus length (because calculate length of corpus can be costly operation).
If not specified - will call `__length__`.
Raises
------
ValueError
If `n` less than zero or greater than corpus size.
Notes
-----
Given the number of remaining documents in a corpus, we need to choose n elements.
The probability for the current element to be chosen is `n` / remaining. If we choose it, we just decrease
the `n` and move to the next element.
Yields
------
list of str
Sampled document as sequence of tokens.
"""
random_generator = random if seed is None else random.Random(seed)
if length is None:
length = len(self)
if not n <= length:
raise ValueError("n {0:d} is larger/equal than length of corpus {1:d}.".format(n, length))
if not 0 <= n:
raise ValueError("Negative sample size n {0:d}.".format(n))
i = 0
for i, sample in enumerate(self.getstream()):
if i == length:
break
remaining_in_corpus = length - i
chance = random_generator.randint(1, remaining_in_corpus)
if chance <= n:
n -= 1
if self.metadata:
yield self.preprocess_text(sample[0]), sample[1]
else:
yield self.preprocess_text(sample)
if n != 0:
# This means that length was set to be greater than number of items in corpus
# and we were not able to sample enough documents before the stream ended.
raise ValueError("length {0:d} greater than number of documents in corpus {1:d}".format(length, i + 1))
def __len__(self):
"""Get length of corpus
Warnings
--------
If self.length is None - will read all corpus for calculate this attribute through
:meth:`~gensim.corpora.textcorpus.TextCorpus.getstream`.
Returns
-------
int
Length of corpus.
"""
if self.length is None:
# cache the corpus length
self.length = sum(1 for _ in self.getstream())
return self.length
class TextDirectoryCorpus(TextCorpus):
"""Read documents recursively from a directory.
Each file/line (depends on `lines_are_documents`) is interpreted as a plain text document.
"""
def __init__(self, input, dictionary=None, metadata=False, min_depth=0, max_depth=None,
pattern=None, exclude_pattern=None, lines_are_documents=False, **kwargs):
"""
Parameters
----------
input : str
Path to input file/folder.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
If `input` is None, the dictionary will remain uninitialized.
metadata : bool, optional
If True - yield metadata with each document.
min_depth : int, optional
Minimum depth in directory tree at which to begin searching for files.
max_depth : int, optional
Max depth in directory tree at which files will no longer be considered.
If None - not limited.
pattern : str, optional
Regex to use for file name inclusion, all those files *not* matching this pattern will be ignored.
exclude_pattern : str, optional
Regex to use for file name exclusion, all files matching this pattern will be ignored.
lines_are_documents : bool, optional
If True - each line is considered a document, otherwise - each file is one document.
kwargs: keyword arguments passed through to the `TextCorpus` constructor.
See :meth:`gemsim.corpora.textcorpus.TextCorpus.__init__` docstring for more details on these.
"""
self._min_depth = min_depth
self._max_depth = sys.maxsize if max_depth is None else max_depth
self.pattern = pattern
self.exclude_pattern = exclude_pattern
self.lines_are_documents = lines_are_documents
super(TextDirectoryCorpus, self).__init__(input, dictionary, metadata, **kwargs)
@property
def lines_are_documents(self):
return self._lines_are_documents
@lines_are_documents.setter
def lines_are_documents(self, lines_are_documents):
self._lines_are_documents = lines_are_documents
self.length = None
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, pattern):
self._pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def exclude_pattern(self):
return self._exclude_pattern
@exclude_pattern.setter
def exclude_pattern(self, pattern):
self._exclude_pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def min_depth(self):
return self._min_depth
@min_depth.setter
def min_depth(self, min_depth):
self._min_depth = min_depth
self.length = None
@property
def max_depth(self):
return self._max_depth
@max_depth.setter
def max_depth(self, max_depth):
self._max_depth = max_depth
self.length = None
def iter_filepaths(self):
"""Generate (lazily) paths to each file in the directory structure within the specified range of depths.
If a filename pattern to match was given, further filter to only those filenames that match.
Yields
------
str
Path to file
"""
for depth, dirpath, dirnames, filenames in walk(self.input):
if self.min_depth <= depth <= self.max_depth:
if self.pattern is not None:
filenames = (n for n in filenames if self.pattern.match(n) is not None)
if self.exclude_pattern is not None:
filenames = (n for n in filenames if self.exclude_pattern.match(n) is None)
for name in filenames:
yield os.path.join(dirpath, name)
def getstream(self):
"""Generate documents from the underlying plain text collection (of one or more files).
Yields
------
str
One document (if lines_are_documents - True), otherwise - each file is one document.
"""
num_texts = 0
for path in self.iter_filepaths():
with open(path, 'rt') as f:
if self.lines_are_documents:
for line in f:
yield line.strip()
num_texts += 1
else:
yield f.read().strip()
num_texts += 1
self.length = num_texts
def __len__(self):
"""Get length of corpus.
Returns
-------
int
Length of corpus.
"""
if self.length is None:
self._cache_corpus_length()
return self.length
def _cache_corpus_length(self):
"""Calculate length of corpus and cache it to `self.length`."""
if not self.lines_are_documents:
self.length = sum(1 for _ in self.iter_filepaths())
else:
self.length = sum(1 for _ in self.getstream())
def walk(top, topdown=True, onerror=None, followlinks=False, depth=0):
"""Generate the file names in a directory tree by walking the tree either top-down or bottom-up.
For each directory in the tree rooted at directory top (including top itself), it yields a 4-tuple
(depth, dirpath, dirnames, filenames).
Parameters
----------
top : str
Root directory.
topdown : bool, optional
If True - you can modify dirnames in-place.
onerror : function, optional
Some function, will be called with one argument, an OSError instance.
It can report the error to continue with the walk, or raise the exception to abort the walk.
Note that the filename is available as the filename attribute of the exception object.
followlinks : bool, optional
If True - visit directories pointed to by symlinks, on systems that support them.
depth : int, optional
Height of file-tree, don't pass it manually (this used as accumulator for recursion).
Notes
-----
This is a mostly copied version of `os.walk` from the Python 2 source code.
The only difference is that it returns the depth in the directory tree structure
at which each yield is taking place.
Yields
------
(int, str, list of str, list of str)
Depth, current path, visited directories, visited non-directories.
See Also
--------
`os.walk documentation <https://docs.python.org/2/library/os.html#os.walk>`_
"""
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
try:
# Should be O(1) since it's probably just reading your filesystem journal
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
# O(n) where n = number of files in the directory
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield depth, top, dirs, nondirs
# Again O(n), where n = number of directories in the directory
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
# Generator so besides the recursive `walk()` call, no additional cost here.
for x in walk(new_path, topdown, onerror, followlinks, depth + 1):
yield x
if not topdown:
yield depth, top, dirs, nondirs
| 25,229 | 35.671512 | 120 | py |
poincare_glove | poincare_glove-master/gensim/corpora/indexedcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Base Indexed Corpus class."""
import logging
import six
import numpy
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class IndexedCorpus(interfaces.CorpusABC):
"""Indexed corpus is a mechanism for random-accessing corpora.
While the standard corpus interface in gensim allows iterating over corpus,
we'll show it with :class:`~gensim.corpora.mmcorpus.MmCorpus`.
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('testcorpus.mm'))
>>> for doc in corpus:
... pass
:class:`~gensim.corpora.indexedcorpus.IndexedCorpus` allows accessing the documents with index
in :math:`{O}(1)` look-up time.
>>> document_index = 3
>>> doc = corpus[document_index]
Notes
-----
This functionality is achieved by storing an extra file (by default named the same as the `fname.index`)
that stores the byte offset of the beginning of each document.
"""
def __init__(self, fname, index_fname=None):
"""
Parameters
----------
fname : str
Path to corpus.
index_fname : str, optional
Path to index, if not provided - used `fname.index`.
"""
try:
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
self.index = utils.unpickle(index_fname)
# change self.index into a numpy.ndarray to support fancy indexing
self.index = numpy.asarray(self.index)
logger.info("loaded corpus index from %s", index_fname)
except Exception:
self.index = None
self.length = None
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None,
progress_cnt=None, labels=None, metadata=False):
"""Serialize corpus with offset metadata, allows to use direct indexes after loading.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
index_fname : str, optional
Where to save resulting index, if None - store index to `fname`.index.
progress_cnt : int, optional
Number of documents after which progress info is printed.
labels : bool, optional
If True - ignore first column (class labels).
metadata : bool, optional
If True - ensure that serialize will write out article titles to a pickle file.
Examples
--------
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [[(1, 0.3), (2, 0.1)], [(1, 0.1)], [(2, 0.3)]]
>>> output_fname = get_tmpfile("test.mm")
>>>
>>> MmCorpus.serialize(output_fname, corpus)
>>> mm = MmCorpus(output_fname) # `mm` document stream now has random access
>>> print(mm[1]) # retrieve document no. 42, etc.
[(1, 0.1)]
"""
if getattr(corpus, 'fname', None) == fname:
raise ValueError("identical input vs. output corpus filename, refusing to serialize: %s" % fname)
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
kwargs = {'metadata': metadata}
if progress_cnt is not None:
kwargs['progress_cnt'] = progress_cnt
if labels is not None:
kwargs['labels'] = labels
offsets = serializer.save_corpus(fname, corpus, id2word, **kwargs)
if offsets is None:
raise NotImplementedError(
"Called serialize on class %s which doesn't support indexing!" % serializer.__name__
)
# store offsets persistently, using pickle
# we shouldn't have to worry about self.index being a numpy.ndarray as the serializer will return
# the offsets that are actually stored on disk - we're not storing self.index in any case, the
# load just needs to turn whatever is loaded from disk back into a ndarray - this should also ensure
# backwards compatibility
logger.info("saving %s index to %s", serializer.__name__, index_fname)
utils.pickle(offsets, index_fname)
def __len__(self):
"""Get the index length.
Notes
-----
If the corpus is not indexed, also count corpus length and cache this value.
Returns
-------
int
Length of index.
"""
if self.index is not None:
return len(self.index)
if self.length is None:
logger.info("caching corpus length")
self.length = sum(1 for _ in self)
return self.length
def __getitem__(self, docno):
"""Get document by `docno` index.
Parameters
----------
docno : {int, iterable of int}
Document number or iterable of numbers (like a list of str).
Returns
-------
list of (int, float)
If `docno` is int - return document in BoW format.
:class:`~gensim.utils.SlicedCorpus`
If `docno` is iterable of int - return several documents in BoW format
wrapped to :class:`~gensim.utils.SlicedCorpus`.
Raises
------
RuntimeError
If index isn't exist.
"""
if self.index is None:
raise RuntimeError("Cannot call corpus[docid] without an index")
if isinstance(docno, (slice, list, numpy.ndarray)):
return utils.SlicedCorpus(self, docno)
elif isinstance(docno, six.integer_types + (numpy.integer,)):
return self.docbyoffset(self.index[docno])
# TODO: no `docbyoffset` method, should be defined in this class
else:
raise ValueError('Unrecognised value for docno, use either a single integer, a slice or a numpy.ndarray')
| 6,298 | 33.233696 | 117 | py |
poincare_glove | poincare_glove-master/gensim/corpora/lowcorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in `GibbsLda++ format <http://gibbslda.sourceforge.net/>`_."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six import iterkeys
from six.moves import xrange, zip as izip
logger = logging.getLogger(__name__)
def split_on_space(s):
"""Split line by spaces, used in :class:`gensim.corpora.lowcorpus.LowCorpus`.
Parameters
----------
s : str
Some line.
Returns
-------
list of str
List of tokens from `s`.
"""
return [word for word in utils.to_unicode(s).strip().split(' ') if word]
class LowCorpus(IndexedCorpus):
"""Corpus handles input in `GibbsLda++ format <http://gibbslda.sourceforge.net/>`_.
**Format description**
Both data for training/estimating the model and new data (i.e., previously unseen data) have the same format
as follows ::
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line after that is one document.
[documenti] is the ith document of the dataset that consists of a list of Ni words/terms ::
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated by the blank character.
Examples
--------
>>> from gensim.test.utils import datapath, get_tmpfile, common_texts
>>> from gensim.corpora import LowCorpus
>>> from gensim.corpora import Dictionary
>>>
>>> # Prepare needed data
>>> dictionary = Dictionary(common_texts)
>>> corpus = [dictionary.doc2bow(doc) for doc in common_texts]
>>>
>>> # Write corpus in GibbsLda++ format to disk
>>> output_fname = get_tmpfile("corpus.low")
>>> LowCorpus.serialize(output_fname, corpus, dictionary)
>>>
>>> # Read corpus
>>> loaded_corpus = LowCorpus(output_fname)
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Parameters
----------
fname : str
Path to file in GibbsLda++ format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `fname`.
line2words : callable, optional
Function which converts lines(str) into tokens(list of str),
using :func:`~gensim.corpora.lowcorpus.split_on_space` as default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
# build a mapping of word id(int) -> word (string)
self.id2word = dict(izip(xrange(len(all_terms)), all_terms))
else:
logger.info("using provided word mapping (%i ids)", len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info(
"loaded corpus with %i documents and %i terms from %s",
self.num_docs, self.num_terms, fname
)
def _calculate_num_docs(self):
"""Get number of documents in file.
Returns
-------
int
Number of documents.
"""
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.smart_open(self.fname) as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
"""Covert line into document in BoW format.
Parameters
----------
line : str
Line from input file.
Returns
-------
list of (int, int)
Document in BoW format
"""
words = self.line2words(line)
if self.use_wordids:
# get all distinct terms in this document, ignore unknown words
uniq_words = set(words).intersection(iterkeys(self.word2id))
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, marker = [], set()
for word in words:
if (word in uniq_words) and (word not in marker):
use_words.append(word)
marker.add(word)
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = [(self.word2id.get(w), words.count(w)) for w in use_words]
else:
uniq_words = set(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = [(w, words.count(w)) for w in uniq_words]
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format.
"""
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the GibbsLda++ format.
Warnings
--------
This function is automatically called by :meth:`gensim.corpora.lowcorpus.LowCorpus.serialize`,
don't call it directly, call :meth:`gensim.corpora.lowcorpus.LowCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping between word_ids (integers) and words (strings).
If not provided, the mapping is constructed directly from `corpus`.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Return
------
list of int
List of offsets in resulting file for each document (in bytes),
can be used for :meth:`~gensim.corpora.lowcorpus.LowCorpus.docbyoffset`
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning(
"List-of-words format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""Get the document stored in file by `offset` position.
Parameters
----------
offset : int
Offset (in bytes) to begin of document.
Returns
-------
list of (int, int)
Document in BoW format.
Examples
--------
>>> from gensim.test.utils import datapath
>>> from gensim.corpora import LowCorpus
>>>
>>> data = LowCorpus(datapath("testcorpus.low"))
>>> data.docbyoffset(1) # end of first line
[]
>>> data.docbyoffset(2) # start of second line
[(0, 1), (3, 1), (4, 1)]
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
"""Get mapping between words and their ids."""
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = utils.revdict(val)
| 9,852 | 33.093426 | 112 | py |
poincare_glove | poincare_glove-master/gensim/corpora/__init__.py | """
This package contains implementations of various streaming corpus I/O format.
"""
# bring corpus classes directly into package namespace, to save some typing
from .indexedcorpus import IndexedCorpus # noqa:F401 must appear before the other classes
from .mmcorpus import MmCorpus # noqa:F401
from .bleicorpus import BleiCorpus # noqa:F401
from .svmlightcorpus import SvmLightCorpus # noqa:F401
from .lowcorpus import LowCorpus # noqa:F401
from .dictionary import Dictionary # noqa:F401
from .hashdictionary import HashDictionary # noqa:F401
from .wikicorpus import WikiCorpus # noqa:F401
from .textcorpus import TextCorpus, TextDirectoryCorpus # noqa:F401
from .ucicorpus import UciCorpus # noqa:F401
from .malletcorpus import MalletCorpus # noqa:F401
| 768 | 41.722222 | 90 | py |
poincare_glove | poincare_glove-master/gensim/corpora/ucicorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in `UCI format <http://archive.ics.uci.edu/ml/datasets/Bag+of+Words>`_."""
from __future__ import with_statement
import logging
from collections import defaultdict
from gensim import utils
from gensim.corpora import Dictionary
from gensim.corpora import IndexedCorpus
from gensim.matutils import MmReader
from gensim.matutils import MmWriter
from six.moves import xrange
logger = logging.getLogger(__name__)
class UciReader(MmReader):
"""Reader of UCI format for :class:`gensim.corpora.ucicorpus.UciCorpus`."""
def __init__(self, input):
"""
Parameters
----------
input : str
Path to file in UCI format.
"""
logger.info('Initializing corpus reader from %s', input)
self.input = input
with utils.smart_open(self.input) as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info(
"accepted corpus with %i documents, %i features, %i non-zero entries",
self.num_docs, self.num_terms, self.num_nnz
)
def skip_headers(self, input_file):
"""Skip headers in `input_file`.
Parameters
----------
input_file : file
File object.
"""
for lineno, _ in enumerate(input_file):
if lineno == 2:
break
class UciWriter(MmWriter):
"""Writer of UCI format for :class:`gensim.corpora.ucicorpus.UciCorpus`.
Notes
---------
This corpus format is identical to `Matrix Market format<http://math.nist.gov/MatrixMarket/formats.html>,
except for different file headers. There is no format line, and the first three lines of the file
contain `number_docs`, `num_terms`, and `num_nnz`, one value per line.
"""
MAX_HEADER_LENGTH = 20 # reserve 20 bytes per header value
FAKE_HEADER = utils.to_utf8(' ' * MAX_HEADER_LENGTH + '\n')
def write_headers(self):
"""Write blank header lines. Will be updated later, once corpus stats are known."""
for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = -1
self.headers_written = True
def update_headers(self, num_docs, num_terms, num_nnz):
"""Update headers with actual values."""
offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if len(value) > len(self.FAKE_HEADER):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False):
"""Write corpus in file.
Parameters
----------
fname : str
Path to output file.
corpus: iterable of list of (int, int)
Corpus in BoW format.
progress_cnt : int, optional
Progress counter, write log message each `progress_cnt` documents.
index : bool, optional
If True - return offsets, otherwise - nothing.
Return
------
list of int
Sequence of offsets to documents (in bytes), only if index=True.
"""
writer = UciWriter(fname)
writer.write_headers()
num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
for docno, bow in enumerate(corpus):
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i", docno)
if index:
posnow = writer.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
vector = [(x, int(y)) for (x, y) in bow if int(y) != 0] # integer count, not floating weights
max_id, veclen = writer.write_vector(docno, vector)
num_terms = max(num_terms, 1 + max_id)
num_nnz += veclen
num_docs = docno + 1
if num_docs * num_terms != 0:
logger.info(
"saved %ix%i matrix, density=%.3f%% (%i/%i)",
num_docs, num_terms, 100.0 * num_nnz / (num_docs * num_terms),
num_nnz, num_docs * num_terms
)
# now write proper headers, by seeking and overwriting the spaces written earlier
writer.update_headers(num_docs, num_terms, num_nnz)
writer.close()
if index:
return offsets
class UciCorpus(UciReader, IndexedCorpus):
"""Corpus in the UCI bag-of-words format."""
def __init__(self, fname, fname_vocab=None):
"""
Parameters
----------
fname : str
Path to corpus in UCI format.
fname_vocab : bool, optional
Path to vocab.
Examples
--------
>>> from gensim.corpora import UciCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = UciCorpus(datapath('testcorpus.uci'))
>>> for document in corpus:
... pass
"""
IndexedCorpus.__init__(self, fname)
UciReader.__init__(self, fname)
if fname_vocab is None:
fname_vocab = utils.smart_extension(fname, '.vocab')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [word.strip() for word in fin]
self.id2word = dict(enumerate(words))
self.transposed = True
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, int)
Document in BoW format.
"""
for docId, doc in super(UciCorpus, self).__iter__():
yield doc # get rid of docId, return the sparse vector only
def create_dictionary(self):
"""Generate :class:`gensim.corpora.dictionary.Dictionary` directly from the corpus and vocabulary data.
Return
------
:class:`gensim.corpora.dictionary.Dictionary`
Dictionary, based on corpus.
Examples
--------
>>> from gensim.corpora.ucicorpus import UciCorpus
>>> from gensim.test.utils import datapath
>>> ucc = UciCorpus(datapath('testcorpus.uci'))
>>> dictionary = ucc.create_dictionary()
"""
dictionary = Dictionary()
# replace dfs with defaultdict to avoid downstream KeyErrors
# uci vocabularies may contain terms that are not used in the document data
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = utils.revdict(self.id2word)
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for docno, doc in enumerate(self):
if docno % 10000 == 0:
logger.info('PROGRESS: processing document %i of %i', docno, self.num_docs)
for word, count in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
"""Save a corpus in the UCI Bag-of-Words format.
Warnings
--------
This function is automatically called by :meth`gensim.corpora.ucicorpus.UciCorpus.serialize`,
don't call it directly, call :meth`gensim.corpora.ucicorpus.UciCorpus.serialize` instead.
Parameters
----------
fname : str
Path to output file.
corpus: iterable of iterable of (int, int)
Corpus in BoW format.
id2word : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}, optional
Mapping between words and their ids. If None - will be inferred from `corpus`.
progress_cnt : int, optional
Progress counter, write log message each `progress_cnt` documents.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Notes
-----
There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + list(id2word))
# write out vocabulary
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s", num_terms, fname_vocab)
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
logger.info("storing corpus in UCI Bag-of-Words format: %s", fname)
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
| 9,478 | 31.799308 | 114 | py |
poincare_glove | poincare_glove-master/gensim/corpora/hashdictionary.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
| 13,148 | 36.676218 | 120 | py |
poincare_glove | poincare_glove-master/gensim/corpora/wikicorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Copyright (C) 2018 Emmanouil Stergiadis <em.stergiadis@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
Notes
-----
If you have the `pattern` package installed, this module will use a fancy lemmatization to get a lemma
of each token (instead of plain alphabetic tokenizer). The package is available at [1]_ .
See :mod:`~gensim.scripts.make_wiki` for a canned (example) script based on this module.
References
----------
.. [1] https://github.com/clips/pattern
"""
import bz2
import logging
import multiprocessing
import re
import signal
from xml.etree.cElementTree import \
iterparse # LXML isn't faster, so let's go with the built-in solution
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger(__name__)
ARTICLE_MIN_WORDS = 50
"""Ignore shorter articles (after full preprocessing)."""
# default thresholds for lengths of individual tokens
TOKEN_MIN_LEN = 2
TOKEN_MAX_LEN = 15
RE_P0 = re.compile(r'<!--.*?-->', re.DOTALL | re.UNICODE)
"""Comments."""
RE_P1 = re.compile(r'<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE)
"""Footnotes."""
RE_P2 = re.compile(r'(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$', re.UNICODE)
"""Links to languages."""
RE_P3 = re.compile(r'{{([^}{]*)}}', re.DOTALL | re.UNICODE)
"""Template."""
RE_P4 = re.compile(r'{{([^}]*)}}', re.DOTALL | re.UNICODE)
"""Template."""
RE_P5 = re.compile(r'\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE)
"""Remove URL, keep description."""
RE_P6 = re.compile(r'\[([^][]*)\|([^][]*)\]', re.DOTALL | re.UNICODE)
"""Simplify links, keep description."""
RE_P7 = re.compile(r'\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE)
"""Keep description of images."""
RE_P8 = re.compile(r'\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE)
"""Keep description of files."""
RE_P9 = re.compile(r'<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE)
"""External links."""
RE_P10 = re.compile(r'<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE)
"""Math content."""
RE_P11 = re.compile(r'<(.*?)>', re.DOTALL | re.UNICODE)
"""All other tags."""
RE_P12 = re.compile(r'\n(({\|)|(\|-)|(\|}))(.*?)(?=\n)', re.UNICODE)
"""Table formatting."""
RE_P13 = re.compile(r'\n(\||\!)(.*?\|)*([^|]*?)', re.UNICODE)
"""Table cell formatting."""
RE_P14 = re.compile(r'\[\[Category:[^][]*\]\]', re.UNICODE)
"""Categories."""
RE_P15 = re.compile(r'\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
"""Remove File and Image templates."""
RE_P16 = re.compile(r'\[{2}(.*?)\]{2}', re.UNICODE)
"""Capture interlinks text and article linked"""
IGNORED_NAMESPACES = [
'Wikipedia', 'Category', 'File', 'Portal', 'Template',
'MediaWiki', 'User', 'Help', 'Book', 'Draft', 'WikiProject',
'Special', 'Talk'
]
"""MediaWiki namespaces [2]_ that ought to be ignored.
References
----------
.. [2] https://www.mediawiki.org/wiki/Manual:Namespace
"""
def find_interlinks(raw):
"""Find all interlinks to other articles in the dump.
Parameters
----------
raw : str
Unicode or utf-8 encoded string.
Returns
-------
dict
Mapping from the linked article to the actual text found.
"""
filtered = filter_wiki(raw, promote_remaining=False, simplify_links=False)
interlinks_raw = re.findall(RE_P16, filtered)
interlinks = {}
for parts in [i.split('|') for i in interlinks_raw]:
actual_title = parts[0]
try:
interlink_text = parts[1]
interlinks[actual_title] = interlink_text
except IndexError:
interlinks[actual_title] = actual_title
legit_interlinks = {i: j for i, j in interlinks.items() if '[' not in i and ']' not in i}
return legit_interlinks
def filter_wiki(raw, promote_remaining=True, simplify_links=True):
"""Filter out wiki markup from `raw`, leaving only text.
Parameters
----------
raw : str
Unicode or utf-8 encoded string.
promote_remaining : bool
Whether uncaught markup should be promoted to plain text.
simplify_links : bool
Whether links should be simplified keeping only their description text.
Returns
-------
str
`raw` without markup.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text, promote_remaining, simplify_links)
def remove_markup(text, promote_remaining=True, simplify_links=True):
"""Filter out wiki markup from `text`, leaving only text.
Parameters
----------
text : str
String containing markup.
promote_remaining : bool
Whether uncaught markup should be promoted to plain text.
simplify_links : bool
Whether links should be simplified keeping only their description text.
Returns
-------
str
`text` without markup.
"""
text = re.sub(RE_P2, '', text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, '', text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, '', text) # remove outside links
text = re.sub(RE_P10, '', text) # remove math content
text = re.sub(RE_P11, '', text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
if simplify_links:
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = re.sub(RE_P13, '\n\\3', text) # leave only cell content
# remove empty mark-up
text = text.replace('[]', '')
# stop if nothing changed between two iterations or after a fixed number of iterations
if old == text or iters > 2:
break
if promote_remaining:
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Parameters
----------
s : str
String containing markup template.
Returns
-------
str
Сopy of `s` with all the wikimedia markup template removed. See [4]_ for wikimedia templates details.
Notes
-----
Since template can be nested, it is difficult remove them using regular expressions.
References
----------
.. [4] http://meta.wikimedia.org/wiki/Help:Template
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], []
in_template = False
prev_c = None
for i, c in enumerate(iter(s)):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
return ''.join([s[end + 1:start] for start, end in zip(starts + [None], [-1] + ends)])
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Parameters
----------
s : str
String containing 'File:' and 'Image:' markup.
Returns
-------
str
Сopy of `s` with all the 'File:' and 'Image:' markup replaced by their corresponding captions. [3]_
References
----------
.. [3] http://www.mediawiki.org/wiki/Help:Images
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content, token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True):
"""Tokenize a piece of text from wikipedia.
Set `token_min_len`, `token_max_len` as character length (not bytes!) thresholds for individual tokens.
Parameters
----------
content : str
String without markup (see :func:`~gensim.corpora.wikicorpus.filter_wiki`).
token_min_len : int
Minimal token length.
token_max_len : int
Maximal token length.
lower : bool
If True - convert `content` to lower case.
Returns
-------
list of str
List of tokens from `content`.
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=lower, errors='ignore')
if token_min_len <= len(token) <= token_max_len and not token.startswith('_')
]
def get_namespace(tag):
"""Get the namespace of tag.
Parameters
----------
tag : str
Namespace or tag.
Returns
-------
str
Matched namespace or tag.
"""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace" % namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File-like object.
filter_namespaces : list of str or bool
Namespaces that will be extracted.
Yields
------
tuple of (str or None, str, str)
Title, text and page id.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
if filter_namespaces:
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(args, tokenizer_func=tokenize, token_min_len=TOKEN_MIN_LEN,
token_max_len=TOKEN_MAX_LEN, lower=True):
"""Parse a wikipedia article, extract all tokens.
Notes
-----
Set `tokenizer_func` (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`) parameter for languages
like japanese or thai to perform better tokenization.
The `tokenizer_func` needs to take 4 parameters: (text: str, token_min_len: int, token_max_len: int, lower: bool).
Parameters
----------
args : (str, bool, str, int)
Article text, lemmatize flag (if True, :func:`~gensim.utils.lemmatize` will be used), article title,
page identificator.
tokenizer_func : function
Function for tokenization (defaults is :func:`~gensim.corpora.wikicorpus.tokenize`).
Needs to have interface:
tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str.
token_min_len : int
Minimal token length.
token_max_len : int
Maximal token length.
lower : bool
If True - convert article text to lower case.
Returns
-------
(list of str, str, int)
List of tokens from article, title and page id.
"""
text, lemmatize, title, pageid = args
text = filter_wiki(text)
if lemmatize:
result = utils.lemmatize(text)
else:
result = tokenizer_func(text, token_min_len, token_max_len, lower)
return result, title, pageid
def init_to_ignore_interrupt():
"""Enables interruption ignoring.
Warnings
--------
Should only be used when master is prepared to handle termination of
child processes.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _process_article(args):
"""Same as :func:`~gensim.corpora.wikicorpus.process_article`, but with args in list format.
Parameters
----------
args : [(str, bool, str, int), (function, int, int, bool)]
First element - same as `args` from :func:`~gensim.corpora.wikicorpus.process_article`,
second element is tokenizer function, token minimal length, token maximal length, lowercase flag.
Returns
-------
(list of str, str, int)
List of tokens from article, title and page id.
Warnings
--------
Should not be called explicitly. Use :func:`~gensim.corpora.wikicorpus.process_article` instead.
"""
tokenizer_func, token_min_len, token_max_len, lower = args[-1]
args = args[:-1]
return process_article(
args, tokenizer_func=tokenizer_func, token_min_len=token_min_len,
token_max_len=token_max_len, lower=lower
)
class WikiCorpus(TextCorpus):
"""Treat a wikipedia articles dump as a **read-only** corpus.
Supported dump formats:
* <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
* <LANG>wiki-latest-pages-articles.xml.bz2
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
Notes
-----
Dumps for English wikipedia can be founded `here <https://dumps.wikimedia.org/enwiki/>`_.
Attributes
----------
metadata : bool
Whether to write articles titles to serialized corpus.
Warnings
--------
"Multistream" archives are *not* supported in Python 2 due to `limitations in the core bz2 library
<https://docs.python.org/2/library/bz2.html#de-compression-of-files>`_.
Examples
--------
>>> from gensim.corpora import WikiCorpus, MmCorpus
>>>
>>> wiki = WikiCorpus('enwiki-20100622-pages-articles.xml.bz2') # create word->word_id mapping, takes almost 8h
>>> MmCorpus.serialize('wiki_en_vocab200k.mm', wiki) # another 8h, creates a file in MatrixMarket format and mapping
"""
def __init__(self, fname, processes=None, lemmatize=utils.has_pattern(), dictionary=None,
filter_namespaces=('0',), tokenizer_func=tokenize, article_min_tokens=ARTICLE_MIN_WORDS,
token_min_len=TOKEN_MIN_LEN, token_max_len=TOKEN_MAX_LEN, lower=True):
"""Initialize the corpus.
Unless a dictionary is provided, this scans the corpus once,
to determine its vocabulary.
Parameters
----------
fname : str
Path to file with wikipedia dump.
processes : int, optional
Number of processes to run, defaults to **number of cpu - 1**.
lemmatize : bool
Whether to use lemmatization instead of simple regexp tokenization.
Defaults to `True` if *pattern* package installed.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Dictionary, if not provided, this scans the corpus once, to determine its vocabulary
(this needs **really long time**).
filter_namespaces : tuple of str
Namespaces to consider.
tokenizer_func : function, optional
Function that will be used for tokenization. By default, use :func:`~gensim.corpora.wikicorpus.tokenize`.
Need to support interface:
tokenizer_func(text: str, token_min_len: int, token_max_len: int, lower: bool) -> list of str.
article_min_tokens : int, optional
Minimum tokens in article. Article will be ignored if number of tokens is less.
token_min_len : int, optional
Minimal token length.
token_max_len : int, optional
Maximal token length.
lower : bool, optional
If True - convert all text to lower case.
"""
self.fname = fname
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
self.tokenizer_func = tokenizer_func
self.article_min_tokens = article_min_tokens
self.token_min_len = token_min_len
self.token_max_len = token_max_len
self.lower = lower
self.dictionary = dictionary or Dictionary(self.get_texts())
def get_texts(self):
"""Iterate over the dump, yielding list of tokens for each article.
Notes
-----
This iterates over the **texts**. If you want vectors, just use the standard corpus interface
instead of this method:
>>> for vec in wiki_corpus:
>>> print(vec)
Yields
------
list of str
If `metadata` is False, yield only list of token extracted from the article.
(list of str, (int, str))
List of tokens (extracted from the article), page id and article title otherwise.
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
tokenization_params = (self.tokenizer_func, self.token_min_len, self.token_max_len, self.lower)
texts = \
((text, self.lemmatize, title, pageid, tokenization_params)
for title, text, pageid
in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))
pool = multiprocessing.Pool(self.processes, init_to_ignore_interrupt)
try:
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(_process_article, group):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < self.article_min_tokens or \
any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
except KeyboardInterrupt:
logger.warn(
"user terminated iteration over Wikipedia corpus after %i documents with %i positions "
"(total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS
)
else:
logger.info(
"finished iterating over Wikipedia corpus of %i documents with %i positions "
"(total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS
)
self.length = articles # cache corpus length
finally:
pool.terminate()
| 21,369 | 33.635332 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/lda_worker.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Worker ("slave") process used in computing distributed LDA. Run this script \
on every node in your cluster. If you wish, you may even run it multiple times \
on a single machine, to make better use of multiple cores (just beware that \
memory footprint increases accordingly).
Example: python -m gensim.models.lda_worker
"""
from __future__ import with_statement
import os
import sys
import logging
import threading
import tempfile
import argparse
try:
import Queue
except ImportError:
import queue as Queue
import Pyro4
from gensim.models import ldamodel
from gensim import utils
logger = logging.getLogger('gensim.models.lda_worker')
# periodically save intermediate models after every SAVE_DEBUG updates (0 for never)
SAVE_DEBUG = 0
LDA_WORKER_PREFIX = 'gensim.lda_worker'
class Worker(object):
def __init__(self):
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
# id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.myid = myid
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s", myid)
self.model = ldamodel.LdaModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""
Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i", self.myid, self.jobsdone)
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs", self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
logger.debug("starting to process job #%i", self.jobsdone)
self.model.do_estep(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lda_worker.pkl')
self.model.save(fname)
logger.info("finished processing job #%i", self.jobsdone - 1)
@Pyro4.expose
def ping(self):
return True
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs", self.myid, self.jobsdone)
result = self.model.state
assert isinstance(result, ldamodel.LdaState)
self.model.clear() # free up mem in-between two EM cycles
self.finished = True
return result
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self, state):
assert state is not None
logger.info("resetting worker #%i", self.myid)
self.model.state = state
self.model.sync_state()
self.model.state.reset()
self.finished = False
@Pyro4.oneway
def exit(self):
logger.info("terminating worker #%i", self.myid)
os._exit(0)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument(
"--no-broadcast", help="Disable broadcast (default: %(default)s)", action='store_const',
default=True, const=False
)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument(
'-v', '--verbose', help='Verbose flag', action='store_const', dest="loglevel",
const=logging.INFO, default=logging.WARNING
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {
"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac
}
utils.pyro_daemon(LDA_WORKER_PREFIX, Worker(), random_suffix=True, ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
| 4,912 | 31.753333 | 106 | py |
poincare_glove | poincare_glove-master/gensim/models/ldamodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
**For a faster implementation of LDA (parallelized for multicore machines), see** :mod:`gensim.models.ldamulticore`.
Latent Dirichlet Allocation (LDA) in Python.
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see
**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
The algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM, and
* is **distributed**: makes use of a cluster of machines, if available, to
speed up model estimation.
.. [1] http://www.cs.princeton.edu/~mdhoffma
"""
import logging
import numbers
import os
import numpy as np
import six
from scipy.special import gammaln, psi # gamma function utils
from scipy.special import polygamma
from six.moves import xrange
from collections import defaultdict
from gensim import interfaces, utils, matutils
from gensim.matutils import (
kullback_leibler, hellinger, jaccard_distance, jensen_shannon,
dirichlet_expectation, logsumexp, mean_absolute_difference
)
from gensim.models import basemodel, CoherenceModel
from gensim.models.callbacks import Callback
logger = logging.getLogger('gensim.models.ldamodel')
DTYPE_TO_EPS = {
np.float16: 1e-5,
np.float32: 1e-35,
np.float64: 1e-100,
}
def update_dir_prior(prior, N, logphat, rho):
"""
Updates a given prior using Newton's method, described in
**Huang: Maximum Likelihood Estimation of Dirichlet Distribution Parameters.**
http://jonathan-huang.org/research/dirichlet/dirichlet.pdf
"""
dprior = np.copy(prior) # TODO: unused var???
gradf = N * (psi(np.sum(prior)) - psi(prior) + logphat)
c = N * polygamma(1, np.sum(prior))
q = -N * polygamma(1, prior)
b = np.sum(gradf / q) / (1 / c + np.sum(1 / q))
dprior = -(gradf - b) / q
if all(rho * dprior + prior > 0):
prior += rho * dprior
else:
logger.warning("updated prior not positive")
return prior
class LdaState(utils.SaveLoad):
"""
Encapsulate information for distributed computation of LdaModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, shape, dtype=np.float32):
self.eta = eta.astype(dtype, copy=False)
self.sstats = np.zeros(shape, dtype=dtype)
self.numdocs = 0
self.dtype = dtype
def reset(self):
"""
Prepare the state for a new EM iteration (reset sufficient stats).
"""
self.sstats[:] = 0.0
self.numdocs = 0
def merge(self, other):
"""
Merge the result of an E step from one node with that of another node
(summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).
"""
assert other is not None
self.sstats += other.sstats
self.numdocs += other.numdocs
def blend(self, rhot, other, targetsize=None):
"""
Given LdaState `other`, merge it with the current state. Stretch both to
`targetsize` documents before merging, so that they are of comparable
magnitude.
Merging is done by average weighting: in the extremes, `rhot=0.0` means
`other` is completely ignored; `rhot=1.0` means `self` is completely ignored.
This procedure corresponds to the stochastic gradient update from Hoffman
et al., algorithm 2 (eq. 14).
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# stretch the current model's expected n*phi counts to target size
if self.numdocs == 0 or targetsize == self.numdocs:
scale = 1.0
else:
scale = 1.0 * targetsize / self.numdocs
self.sstats *= (1.0 - rhot) * scale
# stretch the incoming n*phi counts to target size
if other.numdocs == 0 or targetsize == other.numdocs:
scale = 1.0
else:
logger.info("merging changes from %i documents into a model of %i documents", other.numdocs, targetsize)
scale = 1.0 * targetsize / other.numdocs
self.sstats += rhot * scale * other.sstats
self.numdocs = targetsize
def blend2(self, rhot, other, targetsize=None):
"""
Alternative, more simple blend.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# merge the two matrices by summing
self.sstats += other.sstats
self.numdocs = targetsize
def get_lambda(self):
return self.eta + self.sstats
def get_Elogbeta(self):
return dirichlet_expectation(self.get_lambda())
@classmethod
def load(cls, fname, *args, **kwargs):
result = super(LdaState, cls).load(fname, *args, **kwargs)
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (cause it's default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
return result
# endclass LdaState
class LdaModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""
The constructor estimates Latent Dirichlet Allocation model parameters based
on a training corpus:
>>> lda = LdaModel(corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents, with
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents via
>>> lda.update(other_corpus)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None,
distributed=False, chunksize=2000, passes=1, update_every=1,
alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10,
iterations=50, gamma_threshold=0.001, minimum_probability=0.01,
random_state=None, ns_conf=None, minimum_phi_value=0.01,
per_word_topics=False, callbacks=None, dtype=np.float32):
"""
If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value 'auto', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Turn on `distributed` to force distributed computing
(see the `web tutorial <http://radimrehurek.com/gensim/distributed.html>`_
on how to set up a cluster of machines for gensim).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates (setting this to 1 slows down training ~2x;
default is 10 for better performance). Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be a np.random.RandomState object or the seed for one.
`callbacks` a list of metric callbacks to log/visualize evaluation metrics of topic model during training.
`dtype` is data-type to use during calculations inside model. All inputs are also converted to this dtype.
Available types: `numpy.float16`, `numpy.float32`, `numpy.float64`.
Example:
>>> lda = LdaModel(corpus, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
>>> lda = LdaModel(corpus, num_topics=50, alpha='auto', eval_every=5) # train asymmetric alpha from data
"""
if dtype not in DTYPE_TO_EPS:
raise ValueError(
"Incorrect 'dtype', please choose one of {}".format(
", ".join("numpy.{}".format(tp.__name__) for tp in sorted(DTYPE_TO_EPS))))
self.dtype = dtype
# store user-supplied parameters
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.minimum_phi_value = minimum_phi_value
self.per_word_topics = per_word_topics
self.callbacks = callbacks
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
if isinstance(eta, six.string_types):
if eta == 'asymmetric':
raise ValueError("The 'asymmetric' option cannot be used for eta")
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# set up distributed environment if necessary
if not distributed:
logger.info("using serial LDA version on this node")
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError("auto-optimizing alpha not implemented in distributed LDA")
# set up distributed version
try:
import Pyro4
if ns_conf is None:
ns_conf = {}
with utils.getNS(**ns_conf) as ns:
from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX
self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
logger.debug("looking for dispatcher at %s" % str(self.dispatcher._pyroUri))
self.dispatcher.initialize(
id2word=self.id2word, num_topics=self.num_topics, chunksize=chunksize,
alpha=alpha, eta=eta, distributed=False
)
self.numworkers = len(self.dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
logger.error("failed to initialize distributed LDA (%s)", err)
raise RuntimeError("failed to initialize distributed LDA (%s)" % err)
# Initialize the variational distribution q(beta|lambda)
self.state = LdaState(self.eta, (self.num_topics, self.num_terms), dtype=self.dtype)
self.state.sstats[...] = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# Check that we haven't accidentally fall back to np.float64
assert self.eta.dtype == self.dtype
assert self.expElogbeta.dtype == self.dtype
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
use_numpy = self.dispatcher is not None
self.update(corpus, chunks_as_numpy=use_numpy)
def init_dir_prior(self, prior, name):
if prior is None:
prior = 'symmetric'
if name == 'alpha':
prior_shape = self.num_topics
elif name == 'eta':
prior_shape = self.num_terms
else:
raise ValueError("'name' must be 'alpha' or 'eta'")
is_auto = False
if isinstance(prior, six.string_types):
if prior == 'symmetric':
logger.info("using symmetric %s at %s", name, 1.0 / self.num_topics)
init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)], dtype=self.dtype)
elif prior == 'asymmetric':
init_prior = \
np.asarray([1.0 / (i + np.sqrt(prior_shape)) for i in xrange(prior_shape)], dtype=self.dtype)
init_prior /= init_prior.sum()
logger.info("using asymmetric %s %s", name, list(init_prior))
elif prior == 'auto':
is_auto = True
init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)], dtype=self.dtype)
if name == 'alpha':
logger.info("using autotuned %s, starting with %s", name, list(init_prior))
else:
raise ValueError("Unable to determine proper %s value given '%s'" % (name, prior))
elif isinstance(prior, list):
init_prior = np.asarray(prior, dtype=self.dtype)
elif isinstance(prior, np.ndarray):
init_prior = prior.astype(self.dtype, copy=False)
elif isinstance(prior, np.number) or isinstance(prior, numbers.Real):
init_prior = np.asarray([prior] * prior_shape, dtype=self.dtype)
else:
raise ValueError("%s must be either a np array of scalars, list of scalars, or scalar" % name)
return init_prior, is_auto
def __str__(self):
return "LdaModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % (
self.num_terms, self.num_topics, self.decay, self.chunksize
)
def sync_state(self):
self.expElogbeta = np.exp(self.state.get_Elogbeta())
assert self.expElogbeta.dtype == self.dtype
def clear(self):
"""Clear model state (free up some memory). Used in the distributed algo."""
self.state = None
self.Elogbeta = None
def inference(self, chunk, collect_sstats=False):
"""
Given a chunk of sparse document vectors, estimate gamma (parameters
controlling the topic weights) for each document in the chunk.
This function does not modify the model (=is read-only aka const). The
whole input chunk of document is assumed to fit in RAM; chunking of a
large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma, sstats)`. Otherwise, return `(gamma, None)`. `gamma` is of shape
`len(chunk) x self.num_topics`.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
gamma = self.random_state.gamma(100., 1. / 100., (len(chunk), self.num_topics)).astype(self.dtype, copy=False)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
assert Elogtheta.dtype == self.dtype
assert expElogtheta.dtype == self.dtype
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta, dtype=self.dtype)
else:
sstats = None
converged = 0
# Now, for each document d update that document's gamma and phi
# Inference code copied from Hoffman's `onlineldavb.py` (esp. the
# Lee&Seung trick which speeds things up by an order of magnitude, compared
# to Blei's original LDA-C code, cool!).
for d, doc in enumerate(chunk):
if len(doc) > 0 and not isinstance(doc[0][0], six.integer_types + (np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
cts = np.array([cnt for _, cnt in doc], dtype=self.dtype)
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_w.
# phinorm is the normalizer.
# TODO treat zeros explicitly, instead of adding epsilon?
eps = DTYPE_TO_EPS[self.dtype]
phinorm = np.dot(expElogthetad, expElogbetad) + eps
# Iterate between gamma and phi until convergence
for _ in xrange(self.iterations):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self.alpha + expElogthetad * np.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = np.exp(Elogthetad)
phinorm = np.dot(expElogthetad, expElogbetad) + eps
# If gamma hasn't changed much, we're done.
meanchange = mean_absolute_difference(gammad, lastgamma)
if meanchange < self.gamma_threshold:
converged += 1
break
gamma[d, :] = gammad
assert gammad.dtype == self.dtype
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += np.outer(expElogthetad.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations", converged, len(chunk), self.iterations)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
assert sstats.dtype == self.dtype
assert gamma.dtype == self.dtype
return gamma, sstats
def do_estep(self, chunk, state=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0] # avoids calling len(chunk) on a generator
assert gamma.dtype == self.dtype
return gamma
def update_alpha(self, gammat, rho):
"""
Update parameters for the Dirichlet prior on the per-document
topic weights `alpha` given the last `gammat`.
"""
N = float(len(gammat))
logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N
assert logphat.dtype == self.dtype
self.alpha = update_dir_prior(self.alpha, N, logphat, rho)
logger.info("optimized alpha %s", list(self.alpha))
assert self.alpha.dtype == self.dtype
return self.alpha
def update_eta(self, lambdat, rho):
"""
Update parameters for the Dirichlet prior on the per-topic
word weights `eta` given the last `lambdat`.
"""
N = float(lambdat.shape[0])
logphat = (sum(dirichlet_expectation(lambda_) for lambda_ in lambdat) / N).reshape((self.num_terms,))
assert logphat.dtype == self.dtype
self.eta = update_dir_prior(self.eta, N, logphat, rho)
assert self.eta.dtype == self.dtype
return self.eta
def log_perplexity(self, chunk, total_docs=None):
"""
Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.
"""
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
In distributed mode, the E step is distributed over a cluster of machines.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
Args:
corpus (gensim corpus): The corpus with which the LDA model should be updated.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`LdaModel` constructor.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
try:
lencorpus = len(corpus)
except Exception:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaModel.update() called with an empty corpus")
return
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
if passes == 1:
updatetype += " (single-pass)"
else:
updatetype += " (multi-pass)"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
if self.callbacks:
# pass the list of input callbacks to Callback class
callback = Callback(self.callbacks)
callback.set_model(self)
# initialize metrics list to store metric values after every epoch
self.metrics = defaultdict(list)
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
reallen = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy)):
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# append current epoch's metric values
if self.callbacks:
current_metrics = callback.on_epoch_end(pass_)
for metric, value in current_metrics.items():
self.metrics[metric].append(value)
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
dirty = False
# endfor entire corpus update
def do_mstep(self, rho, other, extra_pass=False):
"""
M step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.
"""
logger.debug("updating topics")
# update self with the new blend; also keep track of how much did
# the topics change through this update, to assess convergence
diff = np.log(self.expElogbeta)
self.state.blend(rho, other)
diff -= self.state.get_Elogbeta()
self.sync_state()
# print out some debug info at the end of each EM iteration
self.print_topics(5)
logger.info("topic diff=%f, rho=%f", np.mean(np.abs(diff)), rho)
if self.optimize_eta:
self.update_eta(self.state.get_lambda(), rho)
if not extra_pass:
# only update if this isn't an additional pass
self.num_updates += other.numdocs
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
Args:
corpus: documents to infer variational bounds from.
gamma: the variational parameters on topic weights for each `corpus`
document (=2d matrix=what comes out of `inference()`).
If not supplied, will be inferred from the model.
subsample_ratio (float): If `corpus` is a sample of the whole corpus,
pass this to inform on what proportion of the corpus it represents.
This is used as a multiplicative factor to scale the likelihood
appropriately.
Returns:
The variational bound score calculated.
"""
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i", d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
assert gammad.dtype == self.dtype
assert Elogthetad.dtype == self.dtype
# E[log p(doc | theta, beta)]
score += np.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += np.sum((self.alpha - gammad) * Elogthetad)
score += np.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))
# Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures
# that the likelihood is always rougly on the same scale.
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += np.sum((self.eta - _lambda) * Elogbeta)
score += np.sum(gammaln(_lambda) - gammaln(self.eta))
if np.ndim(self.eta) == 0:
sum_eta = self.eta * self.num_terms
else:
sum_eta = np.sum(self.eta)
score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
return score
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""
Args:
num_topics (int): show results for first `num_topics` topics.
Unlike LSA, there is no natural ordering between the topics in LDA.
The returned `num_topics <= self.num_topics` subset of all topics is
therefore arbitrary and may change between two LDA training runs.
num_words (int): include top `num_words` with highest probabilities in topic.
log (bool): If True, log output in addition to returning it.
formatted (bool): If True, format topics as strings, otherwise return them as
`(word, probability)` 2-tuples.
Returns:
list: `num_words` most significant words for `num_topics` number of topics
(10 words for top 10 topics, by default).
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * self.random_state.rand(len(self.alpha))
# random_state.rand returns float64, but converting back to dtype won't speed up anything
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[:num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
topic = self.state.get_lambda()
for i in chosen_topics:
topic_ = topic[i]
topic_ = topic_ / topic_.sum() # normalize to probability distribution
bestn = matutils.argsort(topic_, num_words, reverse=True)
topic_ = [(self.id2word[id], topic_[id]) for id in bestn]
if formatted:
topic_ = ' + '.join(['%.3f*"%s"' % (v, k) for k, v in topic_])
shown.append((i, topic_))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic_)
return shown
def show_topic(self, topicid, topn=10):
"""
Args:
topn (int): Only return 2-tuples for the topn most probable words
(ignore the rest).
Returns:
list: of `(word, probability)` 2-tuples for the most probable
words in topic `topicid`.
"""
return [(self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn)]
def get_topics(self):
"""
Returns:
np.ndarray: `num_topics` x `vocabulary_size` array of floats (self.dtype) which represents
the term topic matrix learned during inference.
"""
topics = self.state.get_lambda()
return topics / topics.sum(axis=1)[:, None]
def get_topic_terms(self, topicid, topn=10):
"""
Args:
topn (int): Only return 2-tuples for the topn most probable words
(ignore the rest).
Returns:
list: `(word_id, probability)` 2-tuples for the most probable words
in topic with id `topicid`.
"""
topic = self.get_topics()[topicid]
topic = topic / topic.sum() # normalize to probability distribution
bestn = matutils.argsort(topic, topn, reverse=True)
return [(idx, topic[idx]) for idx in bestn]
def top_topics(self, corpus=None, texts=None, dictionary=None, window_size=None,
coherence='u_mass', topn=20, processes=-1):
"""
Calculate the coherence for each topic; default is Umass coherence.
See the :class:`gensim.models.CoherenceModel` constructor for more info on the
parameters and the different coherence metrics.
Returns:
list: tuples with `(topic_repr, coherence_score)`, where `topic_repr` is a list
of representations of the `topn` terms for the topic. The terms are represented
as tuples of `(membership_in_topic, token)`. The `coherence_score` is a float.
"""
cm = CoherenceModel(
model=self, corpus=corpus, texts=texts, dictionary=dictionary,
window_size=window_size, coherence=coherence, topn=topn,
processes=processes
)
coherence_scores = cm.get_coherence_per_topic()
str_topics = []
for topic in self.get_topics(): # topic = array of vocab_size floats, one per term
bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic
beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token
str_topics.append(beststr) # list of topn (float membership, token) tuples
scored_topics = zip(str_topics, coherence_scores)
return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)
def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None,
per_word_topics=False):
"""
Args:
bow (list): Bag-of-words representation of the document to get topics for.
minimum_probability (float): Ignore topics with probability below this value
(None by default). If set to None, a value of 1e-8 is used to prevent 0s.
per_word_topics (bool): If True, also returns a list of topics, sorted in
descending order of most likely topics for that word. It also returns a list
of word_ids and each words corresponding topics' phi_values, multiplied by
feature length (i.e, word count).
minimum_phi_value (float): if `per_word_topics` is True, this represents a lower
bound on the term probabilities that are included (None by default). If set
to None, a value of 1e-8 is used to prevent 0s.
Returns:
topic distribution for the given document `bow`, as a list of
`(topic_id, topic_probability)` 2-tuples.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
if minimum_phi_value is None:
minimum_phi_value = self.minimum_probability
minimum_phi_value = max(minimum_phi_value, 1e-8) # never allow zero values in sparse output
# if the input vector is a corpus, return a transformed corpus
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(
per_word_topics=per_word_topics,
minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value
)
return self._apply(corpus, **kwargs)
gamma, phis = self.inference([bow], collect_sstats=per_word_topics)
topic_dist = gamma[0] / sum(gamma[0]) # normalize distribution
document_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
if not per_word_topics:
return document_topics
word_topic = [] # contains word and corresponding topic
word_phi = [] # contains word and phi values
for word_type, weight in bow:
phi_values = [] # contains (phi_value, topic) pairing to later be sorted
phi_topic = [] # contains topic and corresponding phi value to be returned 'raw' to user
for topic_id in range(0, self.num_topics):
if phis[topic_id][word_type] >= minimum_phi_value:
# appends phi values for each topic for that word
# these phi values are scaled by feature length
phi_values.append((phis[topic_id][word_type], topic_id))
phi_topic.append((topic_id, phis[topic_id][word_type]))
# list with ({word_id => [(topic_0, phi_value), (topic_1, phi_value) ...]).
word_phi.append((word_type, phi_topic))
# sorts the topics based on most likely topic
# returns a list like ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).
sorted_phi_values = sorted(phi_values, reverse=True)
topics_sorted = [x[1] for x in sorted_phi_values]
word_topic.append((word_type, topics_sorted))
return document_topics, word_topic, word_phi # returns 2-tuple
def get_term_topics(self, word_id, minimum_probability=None):
"""
Args:
word_id (int): ID of the word to get topic probabilities for.
minimum_probability (float): Only include topic probabilities above this
value (None by default). If set to None, use 1e-8 to prevent including 0s.
Returns:
list: The most likely topics for the given word. Each topic is represented
as a tuple of `(topic_id, term_probability)`.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
# if user enters word instead of id in vocab, change to get id
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
for topic_id in range(0, self.num_topics):
if self.expElogbeta[topic_id][word_id] >= minimum_probability:
values.append((topic_id, self.expElogbeta[topic_id][word_id]))
return values
def diff(self, other, distance="kullback_leibler", num_words=100,
n_ann_terms=10, diagonal=False, annotation=True, normed=True):
"""
Calculate difference topic2topic between two Lda models
`other` instances of `LdaMulticore` or `LdaModel`
`distance` is function that will be applied to calculate difference between any topic pair.
Available values: `kullback_leibler`, `hellinger`, `jaccard` and `jensen_shannon`
`num_words` is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)
`n_ann_terms` is max quantity of words in intersection/symmetric difference between topics (used for annotation)
`diagonal` set to True if the difference is required only between the identical topic no.s
(returns diagonal of diff matrix)
`annotation` whether the intersection or difference of words between two topics should be returned
Returns a matrix Z with shape (m1.num_topics, m2.num_topics),
where Z[i][j] - difference between topic_i and topic_j
and matrix annotation (if True) with shape (m1.num_topics, m2.num_topics, 2, None),
where::
annotation[i][j] = [[`int_1`, `int_2`, ...], [`diff_1`, `diff_2`, ...]] and
`int_k` is word from intersection of `topic_i` and `topic_j` and
`diff_l` is word from symmetric difference of `topic_i` and `topic_j`
`normed` is a flag. If `true`, matrix Z will be normalized
Example:
>>> m1, m2 = LdaMulticore.load(path_1), LdaMulticore.load(path_2)
>>> mdiff, annotation = m1.diff(m2)
>>> print(mdiff) # get matrix with difference for each topic pair from `m1` and `m2`
>>> print(annotation) # get array with positive/negative words for each topic pair from `m1` and `m2`
Note: this ignores difference in model dtypes
"""
distances = {
"kullback_leibler": kullback_leibler,
"hellinger": hellinger,
"jaccard": jaccard_distance,
"jensen_shannon": jensen_shannon
}
if distance not in distances:
valid_keys = ", ".join("`{}`".format(x) for x in distances.keys())
raise ValueError("Incorrect distance, valid only {}".format(valid_keys))
if not isinstance(other, self.__class__):
raise ValueError("The parameter `other` must be of type `{}`".format(self.__name__))
distance_func = distances[distance]
d1, d2 = self.get_topics(), other.get_topics()
t1_size, t2_size = d1.shape[0], d2.shape[0]
annotation_terms = None
fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in xrange(t1_size)]
snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in xrange(t2_size)]
if distance == "jaccard":
d1, d2 = fst_topics, snd_topics
if diagonal:
assert t1_size == t2_size, \
"Both input models should have same no. of topics, " \
"as the diagonal will only be valid in a square matrix"
# initialize z and annotation array
z = np.zeros(t1_size)
if annotation:
annotation_terms = np.zeros(t1_size, dtype=list)
else:
# initialize z and annotation matrix
z = np.zeros((t1_size, t2_size))
if annotation:
annotation_terms = np.zeros((t1_size, t2_size), dtype=list)
# iterate over each cell in the initialized z and annotation
for topic in np.ndindex(z.shape):
topic1 = topic[0]
if diagonal:
topic2 = topic1
else:
topic2 = topic[1]
z[topic] = distance_func(d1[topic1], d2[topic2])
if annotation:
pos_tokens = fst_topics[topic1] & snd_topics[topic2]
neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])
pos_tokens = list(pos_tokens)[:min(len(pos_tokens), n_ann_terms)]
neg_tokens = list(neg_tokens)[:min(len(neg_tokens), n_ann_terms)]
annotation_terms[topic] = [pos_tokens, neg_tokens]
if normed:
if np.abs(np.max(z)) > 1e-8:
z /= np.max(z)
return z, annotation_terms
def __getitem__(self, bow, eps=None):
"""
Args:
bow (list): Bag-of-words representation of a document.
eps (float): Ignore topics with probability below `eps`.
Returns:
topic distribution for the given document `bow`, as a list of
`(topic_id, topic_probability)` 2-tuples.
"""
return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)
def save(self, fname, ignore=('state', 'dispatcher'), separately=None, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
`separately` can be used to define which arrays should be stored in separate files.
`ignore` parameter can be used to define which variables should be ignored, i.e. left
out from the pickled lda model. By default the internal `state` is ignored as it uses
its own serialisation not the one provided by `LdaModel`. The `state` and `dispatcher`
will be added to any ignore parameter defined.
Note: do not save as a compressed file if you intend to load the file back with `mmap`.
Note: If you intend to use models across Python 2/3 versions there are a few things to
keep in mind:
1. The pickled Python dictionaries will not work across Python versions
2. The `save` method does not automatically save all np arrays using np, only
those ones that exceed `sep_limit` set in `gensim.utils.SaveLoad.save`. The main
concern here is the `alpha` array if for instance using `alpha='auto'`.
Please refer to the wiki recipes section (goo.gl/qoje24)
for an example on how to work around these issues.
"""
if self.state is not None:
self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)
# Save the dictionary separately if not in 'ignore'.
if 'id2word' not in ignore:
utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))
# make sure 'state', 'id2word' and 'dispatcher' are ignored from the pickled object, even if
# someone sets the ignore list themselves
if ignore is not None and ignore:
if isinstance(ignore, six.string_types):
ignore = [ignore]
ignore = [e for e in ignore if e] # make sure None and '' are not in the list
ignore = list({'state', 'dispatcher', 'id2word'} | set(ignore))
else:
ignore = ['state', 'dispatcher', 'id2word']
# make sure 'expElogbeta' and 'sstats' are ignored from the pickled object, even if
# someone sets the separately list themselves.
separately_explicit = ['expElogbeta', 'sstats']
# Also add 'alpha' and 'eta' to separately list if they are set 'auto' or some
# array manually.
if (isinstance(self.alpha, six.string_types) and self.alpha == 'auto') or \
(isinstance(self.alpha, np.ndarray) and len(self.alpha.shape) != 1):
separately_explicit.append('alpha')
if (isinstance(self.eta, six.string_types) and self.eta == 'auto') or \
(isinstance(self.eta, np.ndarray) and len(self.eta.shape) != 1):
separately_explicit.append('eta')
# Merge separately_explicit with separately.
if separately:
if isinstance(separately, six.string_types):
separately = [separately]
separately = [e for e in separately if e] # make sure None and '' are not in the list
separately = list(set(separately_explicit) | set(separately))
else:
separately = separately_explicit
super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
>>> LdaModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LdaModel, cls).load(fname, *args, **kwargs)
# check if `random_state` attribute has been set after main pickle load
# if set -> the model to be loaded was saved using a >= 0.13.2 version of Gensim
# if not set -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so set `random_state` as the default value
if not hasattr(result, 'random_state'):
result.random_state = utils.get_random_state(None) # using default value `get_random_state(None)`
logging.warning("random_state not set so using default value")
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (cause it's default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
state_fname = utils.smart_extension(fname, '.state')
try:
result.state = LdaState.load(state_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load state from %s: %s", state_fname, e)
id2word_fname = utils.smart_extension(fname, '.id2word')
# check if `id2word_fname` file is present on disk
# if present -> the model to be loaded was saved using a >= 0.13.2 version of Gensim,
# so set `result.id2word` using the `id2word_fname` file
# if not present -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so `result.id2word` already set after the main pickle load
if os.path.isfile(id2word_fname):
try:
result.id2word = utils.unpickle(id2word_fname)
except Exception as e:
logging.warning("failed to load id2word dictionary from %s: %s", id2word_fname, e)
return result
| 55,980 | 43.079528 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/normmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
from gensim import interfaces, matutils
logger = logging.getLogger(__name__)
class NormModel(interfaces.TransformationABC):
"""Objects of this class realize the explicit normalization of vectors (l1 and l2)."""
def __init__(self, corpus=None, norm='l2'):
"""Compute the l1 or l2 normalization by normalizing separately for each document in a corpus.
If :math:`v_{i,j}` is the 'i'th component of the vector representing document 'j', the l1 normalization is
.. math:: l1_{i, j} = \\frac{v_{i,j}}{\sum_k |v_{k,j}|}
the l2 normalization is
.. math:: l2_{i, j} = \\frac{v_{i,j}}{\sqrt{\sum_k v_{k,j}^2}}
Parameters
----------
corpus : iterable of iterable of (int, number), optional
Input corpus.
norm : {'l1', 'l2'}, optional
Norm used to normalize.
"""
self.norm = norm
if corpus is not None:
self.calc_norm(corpus)
else:
pass
def __str__(self):
return "NormModel(num_docs=%s, num_nnz=%s, norm=%s)" % (self.num_docs, self.num_nnz, self.norm)
def calc_norm(self, corpus):
"""Calculate the norm by calling :func:`~gensim.matutils.unitvec` with the norm parameter.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus.
"""
logger.info("Performing %s normalization...", self.norm)
norms = []
numnnz = 0
docno = 0
for bow in corpus:
docno += 1
numnnz += len(bow)
norms.append(matutils.unitvec(bow, self.norm))
self.num_docs = docno
self.num_nnz = numnnz
self.norms = norms
def normalize(self, bow):
"""Normalize a simple count representation.
Parameters
----------
bow : list of (int, number)
Document in BoW format.
Returns
-------
list of (int, number)
Normalized document.
"""
vector = matutils.unitvec(bow, self.norm)
return vector
def __getitem__(self, bow):
"""Call the :func:`~gensim.models.normmodel.NormModel.normalize`.
Parameters
----------
bow : list of (int, number)
Document in BoW format.
Returns
-------
list of (int, number)
Normalized document.
"""
return self.normalize(bow)
| 2,676 | 25.77 | 114 | py |
poincare_glove | poincare_glove-master/gensim/models/callbacks.py | import gensim
import logging
import copy
import sys
import numpy as np
if sys.version_info[0] >= 3:
from queue import Queue
else:
from Queue import Queue
# Visdom is used for training stats visualization
try:
from visdom import Visdom
VISDOM_INSTALLED = True
except ImportError:
VISDOM_INSTALLED = False
class Metric(object):
"""
Base Metric class for topic model evaluation metrics
"""
def __str__(self):
"""
Return a string representation of Metric class
"""
if self.title is not None:
return self.title
else:
return type(self).__name__[:-6]
def set_parameters(self, **parameters):
"""
Set the parameters
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
def get_value(self):
pass
class CoherenceMetric(Metric):
"""
Metric class for coherence evaluation
"""
def __init__(self, corpus=None, texts=None, dictionary=None, coherence=None,
window_size=None, topn=10, logger=None, viz_env=None, title=None):
"""
Args:
corpus : Gensim document corpus.
texts : Tokenized texts. Needed for coherence models that use sliding window based probability estimator,
dictionary : Gensim dictionary mapping of id word to create corpus. If model.id2word is present,
this is not needed. If both are provided, dictionary will be used.
window_size : Is the size of the window to be used for coherence measures using boolean
sliding window as their probability estimator. For 'u_mass' this doesn't matter.
If left 'None' the default window sizes are used which are:
'c_v' : 110
'c_uci' : 10
'c_npmi' : 10
coherence : Coherence measure to be used. Supported values are:
'u_mass'
'c_v'
'c_uci' also popularly known as c_pmi
'c_npmi'
For 'u_mass' corpus should be provided. If texts is provided, it will be converted
to corpus using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' texts should be provided.
Corpus is not needed.
topn : Integer corresponding to the number of top words to be extracted from each topic.
logger : Monitor training process using:
"shell" : print coherence value in shell
"visdom" : visualize coherence value with increasing epochs in Visdom visualization framework
viz_env : Visdom environment to use for plotting the graph
title : title of the graph plot
"""
self.corpus = corpus
self.dictionary = dictionary
self.coherence = coherence
self.texts = texts
self.window_size = window_size
self.topn = topn
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""
Args:
model : Pre-trained topic model. Should be provided if topics is not provided.
Currently supports LdaModel, LdaMallet wrapper and LdaVowpalWabbit wrapper. Use 'topics'
parameter to plug in an as yet unsupported model.
topics : List of tokenized topics.
"""
# only one of the model or topic would be defined
self.model = None
self.topics = None
super(CoherenceMetric, self).set_parameters(**kwargs)
cm = gensim.models.CoherenceModel(
model=self.model, topics=self.topics, texts=self.texts, corpus=self.corpus,
dictionary=self.dictionary, window_size=self.window_size,
coherence=self.coherence, topn=self.topn
)
return cm.get_coherence()
class PerplexityMetric(Metric):
"""
Metric class for perplexity evaluation
"""
def __init__(self, corpus=None, logger=None, viz_env=None, title=None):
"""
Args:
corpus : Gensim document corpus
logger : Monitor training process using:
"shell" : print coherence value in shell
"visdom" : visualize coherence value with increasing epochs in Visdom visualization framework
viz_env : Visdom environment to use for plotting the graph
title : title of the graph plot
"""
self.corpus = corpus
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""
Args:
model : Trained topic model
"""
super(PerplexityMetric, self).set_parameters(**kwargs)
corpus_words = sum(cnt for document in self.corpus for _, cnt in document)
perwordbound = self.model.bound(self.corpus) / corpus_words
return np.exp2(-perwordbound)
class DiffMetric(Metric):
"""
Metric class for topic difference evaluation
"""
def __init__(self, distance="jaccard", num_words=100, n_ann_terms=10, diagonal=True,
annotation=False, normed=True, logger=None, viz_env=None, title=None):
"""
Args:
distance : measure used to calculate difference between any topic pair. Available values:
`kullback_leibler`
`hellinger`
`jaccard`
num_words : is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)
n_ann_terms : max quantity of words in intersection/symmetric difference
between topics (used for annotation)
diagonal : difference between identical topic no.s
annotation : intersection or difference of words between topics
normed (bool) : If `true`, matrix/array Z will be normalized
logger : Monitor training process using:
"shell" : print coherence value in shell
"visdom" : visualize coherence value with increasing epochs in Visdom visualization framework
viz_env : Visdom environment to use for plotting the graph
title : title of the graph plot
"""
self.distance = distance
self.num_words = num_words
self.n_ann_terms = n_ann_terms
self.diagonal = diagonal
self.annotation = annotation
self.normed = normed
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""
Args:
model : Trained topic model
other_model : second topic model instance to calculate the difference from
"""
super(DiffMetric, self).set_parameters(**kwargs)
diff_diagonal, _ = self.model.diff(
self.other_model, self.distance, self.num_words, self.n_ann_terms,
self.diagonal, self.annotation, self.normed
)
return diff_diagonal
class ConvergenceMetric(Metric):
"""
Metric class for convergence evaluation
"""
def __init__(self, distance="jaccard", num_words=100, n_ann_terms=10, diagonal=True,
annotation=False, normed=True, logger=None, viz_env=None, title=None):
"""
Args:
distance : measure used to calculate difference between any topic pair. Available values:
`kullback_leibler`
`hellinger`
`jaccard`
num_words : is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)
n_ann_terms : max quantity of words in intersection/symmetric difference
between topics (used for annotation)
diagonal : difference between identical topic no.s
annotation : intersection or difference of words between topics
normed (bool) : If `true`, matrix/array Z will be normalized
logger : Monitor training process using:
"shell" : print coherence value in shell
"visdom" : visualize coherence value with increasing epochs in Visdom visualization framework
viz_env : Visdom environment to use for plotting the graph
title : title of the graph plot
"""
self.distance = distance
self.num_words = num_words
self.n_ann_terms = n_ann_terms
self.diagonal = diagonal
self.annotation = annotation
self.normed = normed
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""
Args:
model : Trained topic model
other_model : second topic model instance to calculate the difference from
"""
super(ConvergenceMetric, self).set_parameters(**kwargs)
diff_diagonal, _ = self.model.diff(
self.other_model, self.distance, self.num_words, self.n_ann_terms,
self.diagonal, self.annotation, self.normed
)
return np.sum(diff_diagonal)
class Callback(object):
"""
Used to log/visualize the evaluation metrics during training. The values are stored at the end of each epoch.
"""
def __init__(self, metrics):
"""
Args:
metrics : a list of callbacks. Possible values:
"CoherenceMetric"
"PerplexityMetric"
"DiffMetric"
"ConvergenceMetric"
"""
# list of metrics to be plot
self.metrics = metrics
def set_model(self, model):
"""
Save the model instance and initialize any required variables which would be updated throughout training
"""
self.model = model
self.previous = None
# check for any metric which need model state from previous epoch
if any(isinstance(metric, (DiffMetric, ConvergenceMetric)) for metric in self.metrics):
self.previous = copy.deepcopy(model)
# store diff diagonals of previous epochs
self.diff_mat = Queue()
if any(metric.logger == "visdom" for metric in self.metrics):
if not VISDOM_INSTALLED:
raise ImportError("Please install Visdom for visualization")
self.viz = Visdom()
# store initial plot windows of every metric (same window will be updated with increasing epochs)
self.windows = []
if any(metric.logger == "shell" for metric in self.metrics):
# set logger for current topic model
self.log_type = logging.getLogger('gensim.models.ldamodel')
def on_epoch_end(self, epoch, topics=None):
"""
Log or visualize current epoch's metric value
Args:
epoch : current epoch no.
topics : topic distribution from current epoch (required for coherence of unsupported topic models)
"""
# stores current epoch's metric values
current_metrics = {}
# plot all metrics in current epoch
for i, metric in enumerate(self.metrics):
label = str(metric)
value = metric.get_value(topics=topics, model=self.model, other_model=self.previous)
current_metrics[label] = value
if metric.logger == "visdom":
if epoch == 0:
if value.ndim > 0:
diff_mat = np.array([value])
viz_metric = self.viz.heatmap(
X=diff_mat.T, env=metric.viz_env, opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
# store current epoch's diff diagonal
self.diff_mat.put(diff_mat)
# saving initial plot window
self.windows.append(copy.deepcopy(viz_metric))
else:
viz_metric = self.viz.line(
Y=np.array([value]), X=np.array([epoch]), env=metric.viz_env,
opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
# saving initial plot window
self.windows.append(copy.deepcopy(viz_metric))
else:
if value.ndim > 0:
# concatenate with previous epoch's diff diagonals
diff_mat = np.concatenate((self.diff_mat.get(), np.array([value])))
self.viz.heatmap(
X=diff_mat.T, env=metric.viz_env, win=self.windows[i],
opts=dict(xlabel='Epochs', ylabel=label, title=label)
)
self.diff_mat.put(diff_mat)
else:
self.viz.updateTrace(
Y=np.array([value]), X=np.array([epoch]), env=metric.viz_env, win=self.windows[i]
)
if metric.logger == "shell":
statement = "".join(("Epoch ", str(epoch), ": ", label, " estimate: ", str(value)))
self.log_type.info(statement)
# check for any metric which need model state from previous epoch
if isinstance(metric, (DiffMetric, ConvergenceMetric)):
self.previous = copy.deepcopy(self.model)
return current_metrics
class CallbackAny2Vec(object):
"""Base class to build callbacks. Callbacks are used to apply custom functions over the model at specific points
during training (epoch start, batch end etc.). To implement a Callback, subclass
:class:`~gensim.models.callbacks.CallbackAny2Vec`, look at the example below
which creates a callback to save a training model after each epoch:
>>> from gensim.test.utils import common_texts as sentences
>>> from gensim.models.callbacks import CallbackAny2Vec
>>> from gensim.models import Word2Vec
>>> from gensim.test.utils import get_tmpfile
>>>
>>> class EpochSaver(CallbackAny2Vec):
... "Callback to save model after every epoch"
... def __init__(self, path_prefix):
... self.path_prefix = path_prefix
... self.epoch = 0
... def on_epoch_end(self, model):
... output_path = '{}_epoch{}.model'.format(self.path_prefix, self.epoch)
... print("Save model to {}".format(output_path))
... model.save(output_path)
... self.epoch += 1
...
>>>
>>> class EpochLogger(CallbackAny2Vec):
... "Callback to log information about training"
... def __init__(self):
... self.epoch = 0
... def on_epoch_begin(self, model):
... print("Epoch #{} start".format(self.epoch))
... def on_epoch_end(self, model):
... print("Epoch #{} end".format(self.epoch))
... self.epoch += 1
...
>>> epoch_saver = EpochSaver(get_tmpfile("temporary_model"))
>>> epoch_logger = EpochLogger()
>>> w2v_model = Word2Vec(sentences, iter=5, size=10, min_count=0, seed=42, callbacks=[epoch_saver, epoch_logger])
"""
def on_epoch_begin(self, model):
"""Method called on the start of epoch.
Parameters
----------
model : class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Current model.
"""
pass
def on_epoch_end(self, model):
"""Method called on the end of epoch.
Parameters
----------
model
Returns
-------
"""
pass
def on_batch_begin(self, model):
"""Method called on the start of batch.
Parameters
----------
model : class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Current model.
"""
pass
def on_batch_end(self, model):
"""Method called on the end of batch.
Parameters
----------
model : class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Current model.
"""
pass
def on_train_begin(self, model):
"""Method called on the start of training process.
Parameters
----------
model : class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Current model.
"""
pass
def on_train_end(self, model):
"""Method called on the end of training process.
Parameters
----------
model : class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`
Current model.
"""
pass
class LossLogger(CallbackAny2Vec):
"Callback to log the epoch loss and the batch loss"
def __init__(self, log_file, print_delay=10):
self.epoch = 0
self.print_delay = print_delay
self.log_file = log_file
def on_epoch_end(self, model):
_, rw_spearman_corr, _ = model.wv.evaluate_word_pairs('../data/Stanford Rare Word/rw_processed.txt')
_, wordsim_spearman, _ = model.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/wordsim353.tsv')
_, simlex_spearman, _ = model.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/simlex999.txt')
with open(self.log_file, "a") as f:
f.write(
"EPOCH - %d : EPOCH END: epoch_loss %.4f, RareWord_corr %.5f, WordSim_corr %.5f, SimLex_corr %.5f\n" %
((self.epoch + 1), model.get_latest_training_loss(), rw_spearman_corr[0], wordsim_spearman[0],
simlex_spearman[0])
)
self.epoch += 1
class WordEmbCheckpointSaver(CallbackAny2Vec):
def __init__(self, ckpt_filename):
self.ckpt_filename = ckpt_filename
def on_train_begin(self, model):
if hasattr(model, "word_checkpoints"):
# Convert back from numpy array to python list.
model.word_checkpoints.convert_to_list()
model.word_checkpoints.add_checkpoints()
def on_train_end(self, model):
if hasattr(model, "word_checkpoints"):
with open(self.ckpt_filename, "wb") as f:
model.word_checkpoints.save(f)
class LossSetter(CallbackAny2Vec):
"Callback to reset the epoch loss before each epoch and the batch loss before each batch"
def on_epoch_begin(self, model):
model.running_training_loss = 0.0
class VectorNormLogger(CallbackAny2Vec):
def __init__(self, log_file):
self.epoch = -1
self.log_file = log_file
self.word_freq_buckets = [0, 10, 100, 500, 1000, 5000, 10000, 50000, 100000]
self.vocab_size = 0
self.num_words = 0
def on_train_begin(self, model):
# Setup required to be able to compute norms for each bucket.
self.vocab_size = len(model.wv.index2entity)
for i, b in enumerate(self.word_freq_buckets):
if b > self.vocab_size:
self.word_freq_buckets[i] = self.vocab_size
self.word_freq_buckets = self.word_freq_buckets[:i+1]
break
self.num_words = self.word_freq_buckets[-1]
self.on_epoch_end(model)
def on_epoch_end(self, model):
# Compute the average norm for each word frequency bucket, for both target vectors and context vectors.
target_norms = np.array([np.linalg.norm(model.wv.word_vec(w)) for w in model.wv.index2entity[:self.num_words]])
context_norms = np.array([np.linalg.norm(model.trainables.syn1neg[idx])
for idx in range(self.vocab_size)[:self.num_words]])
target_emb_sums = np.array([np.sum(target_norms[start:end])
for start, end in zip(self.word_freq_buckets[:-1], self.word_freq_buckets[1:])])
context_emb_sums = np.array([np.sum(context_norms[start:end])
for start, end in zip(self.word_freq_buckets[:-1], self.word_freq_buckets[1:])])
target_emb_avgs = [np.sum(target_emb_sums[:i+1]) / self.word_freq_buckets[i+1]
for i in range(len(self.word_freq_buckets)-1)]
context_emb_avgs = [np.sum(context_emb_sums[:i+1]) / self.word_freq_buckets[i+1]
for i in range(len(self.word_freq_buckets)-1)]
with open(self.log_file, "a") as f:
for i, bucket in enumerate(self.word_freq_buckets[1:]):
f.write(
"EPOCH - %d : Average target and context vector norm for most frequent %d words: %.4f / %.4f\n" %
((self.epoch + 1), bucket, target_emb_avgs[i], context_emb_avgs[i])
)
self.epoch += 1
| 20,848 | 38.18985 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/lsimodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Module for `Latent Semantic Analysis (aka Latent Semantic Indexing)
<https://en.wikipedia.org/wiki/Latent_semantic_analysis#Latent_semantic_indexing>`_.
Implements fast truncated SVD (Singular Value Decomposition). The SVD decomposition can be updated with new observations
at any time, for an online, incremental, memory-efficient training.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size
* corpora that are streamed: documents are only accessed sequentially, no
random access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
Examples
--------
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> vectorized_corpus = model[common_corpus] # vectorize input copus in BoW format
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy as np
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from six import iterkeys
from six.moves import xrange
from gensim import interfaces, matutils, utils
from gensim.models import basemodel
logger = logging.getLogger(__name__)
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""Find how many factors should be kept to avoid storing spurious (tiny, numerically unstable) values.
Parameters
----------
s : list of float
Eigenvalues of the original matrix.
k : int
Maximum desired rank (number of factors)
discard: float
Percentage of the spectrum's energy to be discarded.
Returns
-------
int
Rank (number of factors) of the reduced matrix.
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = np.abs(1.0 - np.cumsum(s / np.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(np.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)", k, 100 * rel_spectrum[k - 1])
return k
def asfarray(a, name=''):
"""Get an array laid out in Fortran order in memory.
Parameters
----------
a : numpy.ndarray
Input array.
name : str, optional
Array name, used for logging purposes.
Returns
-------
np.ndarray
The input `a` in Fortran, or column-major order.
"""
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order", a.shape, name)
a = np.asfortranarray(a)
return a
def ascarray(a, name=''):
"""Return a contiguous array in memory (C order).
Parameters
----------
a : numpy.ndarray
Input array.
name : str, optional
Array name, used for logging purposes.
Returns
-------
np.ndarray
Contiguous array (row-major order) of same shape and content as `a`.
"""
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order", a.shape, name)
a = np.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
"""Lower dimension projections of a Term-Passage matrix.
This is the class taking care of the 'core math': interfacing with corpora, splitting large corpora into chunks
and merging them etc. This done through the higher-level :class:`~gensim.models.lsimodel.LsiModel` class.
Notes
-----
The projection can be later updated by merging it with another :class:`~gensim.models.lsimodel.Projection`
via :meth:`~gensim.models.lsimodel.Projection.merge`.
"""
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS,
extra_dims=P2_EXTRA_DIMS, dtype=np.float64):
"""Construct the (U, S) projection from a corpus.
Parameters
----------
m : int
Number of features (terms) in the corpus.
k : int
Desired rank of the decomposed matrix.
docs : {iterable of list of (int, float), scipy.sparse.csc}
Corpus in BoW format or as sparse matrix.
use_svdlibc : bool, optional
If True - will use `sparsesvd library <https://pypi.python.org/pypi/sparsesvd/>`_,
otherwise - our own version will be used.
power_iters: int, optional
Number of power iteration steps to be used. Tune to improve accuracy.
extra_dims : int, optional
Extra samples to be used besides the rank `k`. Tune to improve accuracy.
dtype : numpy.dtype, optional
Enforces a type for elements of the decomposed matrix.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(
docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims, dtype=dtype)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix", str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
# ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30)
u = ut.T
del ut, vt
k = clip_spectrum(s ** 2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
"""Get an empty Projection with the same parameters as the current object.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
An empty copy (without corpus) of the current projection.
"""
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""Merge current :class:`~gensim.models.lsimodel.Projection` instance with another.
Warnings
--------
The content of `other` is destroyed in the process, so pass this function a copy of `other`
if you need it further. The `other` :class:`~gensim.models.lsimodel.Projection` is expected to contain
the same number of features.
Parameters
----------
other : :class:`~gensim.models.lsimodel.Projection`
The Projection object to be merged into the current one. It will be destroyed after merging.
decay : float, optional
Weight of existing observations relatively to new ones.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the input document stream,
by giving less emphasis to old observations. This allows LSA to gradually "forget" old observations
(documents) and give more preference to new ones.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError(
"vector space mismatch: update is using %s features, expected %s" % (other.m, self.m)
)
logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = np.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= np.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = np.bmat([
[np.diag(decay * self.s), np.multiply(c, other.s)],
[matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]
])
logger.debug("computing SVD of %s dense matrix", k.shape)
try:
# in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of np, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/np-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/np/ticket/706
# sdoering: replaced np's linalg.svd with scipy's linalg.svd:
# TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper
# for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False)
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
# if this fails too, give up with an exception
u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False)
s_k = np.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k ** 2, self.k)
u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = np.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = np.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
class LsiModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""Model for `Latent Semantic Indexing
<https://en.wikipedia.org/wiki/Latent_semantic_analysis#Latent_semantic_indexing>`_.
Algorithm of decomposition described in `"Fast and Faster: A Comparison of Two Streamed
Matrix Decomposition Algorithms" <https://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf>`_.
Notes
-----
* :attr:`gensim.models.lsimodel.LsiModel.projection.u` - left singular vectors,
* :attr:`gensim.models.lsimodel.LsiModel.projection.s` - singular values,
* ``model[training_corpus]`` - right singular vectors (can be reconstructed if needed).
See Also
--------
`FAQ about LSI matrices
<https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi>`_.
Examples
--------
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus[:3], id2word=common_dictionary) # train model
>>> vector = model[common_corpus[4]] # apply model to BoW document
>>> model.add_documents(common_corpus[4:]) # update model with new documents
>>> tmp_fname = get_tmpfile("lsi.model")
>>> model.save(tmp_fname) # save model
>>> loaded_model = LsiModel.load(tmp_fname) # load model
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS, dtype=np.float64):
"""Construct an `LsiModel` object.
Either `corpus` or `id2word` must be supplied in order to train the model.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_terms`, `num_documents`).
num_topics : int, optional
Number of requested factors (latent dimensions)
id2word : dict of {int: str}, optional
ID to word mapping, optional.
chunksize : int, optional
Number of documents to be used in each training chunk.
decay : float, optional
Weight of existing observations relatively to new ones.
distributed : bool, optional
If True - distributed mode (parallel execution on several machines) will be used.
onepass : bool, optional
Whether the one-pass algorithm should be used for training.
Pass `False` to force a multi-pass stochastic algorithm.
power_iters: int, optional
Number of power iteration steps to be used.
Increasing the number of power iterations improves accuracy, but lowers performance
extra_samples : int, optional
Extra samples to be used besides the rank `k`. Can improve accuracy.
dtype : type, optional
Enforces a type for elements of the decomposed matrix.
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
self.dtype = dtype
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + (max(self.id2word.keys()) if self.id2word else -1)
self.docs_processed = 0
self.projection = Projection(
self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples, dtype=dtype
)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError(
"distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized."
)
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
logger.debug("looking for dispatcher at %s", str(dispatcher._pyroUri))
dispatcher.initialize(
id2word=self.id2word, num_topics=num_topics, chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples, distributed=False, onepass=onepass
)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)", err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""Update model with new `corpus`.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_terms`, num_documents).
chunksize : int, optional
Number of documents to be used in each training chunk, will use `self.chunksize` if not specified.
decay : float, optional
Weight of existing observations relatively to new ones, will use `self.decay` if not specified.
Notes
-----
Training proceeds in chunks of `chunksize` documents at a time. The size of `chunksize` is a tradeoff
between increased speed (bigger `chunksize`) vs. lower memory footprint (smaller `chunksize`).
If the distributed mode is on, each chunk is sent to a different worker/computer.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None, dtype=self.dtype)
update.u, update.s = stochastic_svd(
corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters, dtype=self.dtype
)
self.projection.merge(update, decay=decay)
self.docs_processed += len(corpus) if hasattr(corpus, '__len__') else 0
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(
chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz, dtype=self.dtype)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i", chunk_no)
# put job into queue; this will eventually block, because the queue has a small finite size
self.dispatcher.putjob(job)
del job
logger.info("dispatched documents up to #%s", doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(
self.num_terms, self.num_topics, job, extra_dims=self.extra_samples,
power_iters=self.power_iters, dtype=self.dtype
)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s", doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
self.docs_processed += doc_no
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
update = Projection(
self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples,
power_iters=self.power_iters, dtype=self.dtype
)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents", corpus.shape[1])
self.docs_processed += corpus.shape[1]
def __str__(self):
"""Get a human readable representation of model.
Returns
-------
str
A human readable string of the current objects parameters.
"""
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % (
self.num_terms, self.num_topics, self.decay, self.chunksize
)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""Get the latent representation for `bow`.
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Document or corpus in BoW representation.
scaled : bool, optional
If True - topics will be scaled by the inverse of singular values.
chunksize : int, optional
Number of documents to be used in each applying chunk.
Returns
-------
list of (int, float)
Latent representation of topics in BoW format for document **OR**
:class:`gensim.matutils.Dense2Corpus`
Latent representation of corpus in BoW format if `bow` is corpus.
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse),
# but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = np.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use np's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = np.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = np.dot(u.take(indices, axis=0).T, np.array(data, dtype=u.dtype))
if not is_corpus:
# convert back from matrix into a 1d vec
topic_dist = topic_dist.reshape(-1)
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a np array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def get_topics(self):
"""Get the topic vectors.
Notes
-----
The number of topics can actually be smaller than `self.num_topics`, if there were not enough factors
(real rank of input matrix smaller than `self.num_topics`).
Returns
-------
np.ndarray
The term topic matrix with shape (`num_topics`, `vocabulary_size`)
"""
projections = self.projection.u.T
num_topics = len(projections)
topics = []
for i in range(num_topics):
c = np.asarray(projections[i, :]).flatten()
norm = np.sqrt(np.sum(np.dot(c, c)))
topics.append(1.0 * c / norm)
return np.array(topics)
def show_topic(self, topicno, topn=10):
"""Get the words that define a topic along with their contribution.
This is actually the left singular vector of the specified topic. The most important words in defining the topic
(in both directions) are included in the string, along with their contribution to the topic.
Parameters
----------
topicno : int
The topics id number.
topn : int
Number of words to be included to the result.
Returns
-------
list of (str, float)
Topic representation in BoW format.
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = np.asarray(self.projection.u.T[topicno, :]).flatten()
norm = np.sqrt(np.sum(np.dot(c, c)))
most = matutils.argsort(np.abs(c), topn, reverse=True)
return [(self.id2word[val], 1.0 * c[val] / norm) for val in most]
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""Get the most significant topics.
Parameters
----------
num_topics : int, optional
The number of topics to be selected, if -1 - all topics will be in result (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
log : bool, optional
If True - log topics with logger.
formatted : bool, optional
If True - each topic represented as string, otherwise - in BoW format.
Returns
-------
list of (int, str)
If `formatted=True`, return sequence with (topic_id, string representation of topics) **OR**
list of (int, list of (str, float))
Otherwise, return sequence with (topic_id, [(word, value), ... ]).
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i(%.3f): %s", i, self.projection.s[i], topic)
return shown
def print_debug(self, num_topics=5, num_words=10):
"""Print (to log) the most salient words of the first `num_topics` topics.
Unlike :meth:`~gensim.models.lsimodel.LsiModel.print_topics`, this looks for words that are significant for
a particular topic *and* not for others. This *should* result in a
more human-interpretable description of topics.
Alias for :func:`~gensim.models.lsimodel.print_debug`.
Parameters
----------
num_topics : int, optional
The number of topics to be selected (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
"""
# only wrap the module-level fnc
print_debug(
self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words
)
def save(self, fname, *args, **kwargs):
"""Save the model to a file.
Notes
-----
Large internal arrays may be stored into separate files, with `fname` as prefix.
Warnings
--------
Do not save as a compressed file if you intend to load the file back with `mmap`.
Parameters
----------
fname : str
Path to output file.
*args
Variable length argument list, see :meth:`gensim.utils.SaveLoad.save`.
**kwargs
Arbitrary keyword arguments, see :meth:`gensim.utils.SaveLoad.save`.
See Also
--------
:meth:`~gensim.models.lsimodel.LsiModel.load`
"""
if self.projection is not None:
self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously saved object using :meth:`~gensim.models.lsimodel.LsiModel.save` from file.
Notes
-----
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
Parameters
----------
fname : str
Path to file that contains LsiModel.
*args
Variable length argument list, see :meth:`gensim.utils.SaveLoad.load`.
**kwargs
Arbitrary keyword arguments, see :meth:`gensim.utils.SaveLoad.load`.
See Also
--------
:meth:`~gensim.models.lsimodel.LsiModel.save`
Returns
-------
:class:`~gensim.models.lsimodel.LsiModel`
Loaded instance.
Raises
------
IOError
When methods are called on instance (should be called from class).
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LsiModel, cls).load(fname, *args, **kwargs)
projection_fname = utils.smart_extension(fname, '.projection')
try:
result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s", projection_fname, e)
return result
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
"""Log the most salient words per topic.
Parameters
----------
id2token : :class:`~gensim.corpora.dictionary.Dictionary`
Mapping from ID to word in the Dictionary.
u : np.ndarray
The 2D U decomposition matrix.
s : np.ndarray
The 1D reduced array of eigenvalues used for decomposition.
topics : list of int
Sequence of topic IDs to be printed
num_words : int, optional
Number of words to be included for each topic.
num_neg : int, optional
Number of words with a negative contribution to a topic that should be included.
"""
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics', len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = np.abs(np.asarray(uvec).flatten())
udiff = uvec / np.sqrt(np.sum(np.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words", num_words, num_neg)
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s', topic, s[topic], ', '.join(pos), ', '.join(neg))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=np.float64, eps=1e-6):
"""Run truncated Singular Value Decomposition (SVD) on a sparse input.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse}
Input corpus as a stream (does not have to fit in RAM)
or a sparse matrix of shape (`num_terms`, num_documents).
rank : int
Desired number of factors to be retained after decomposition.
num_terms : int
The number of features (terms) in `corpus`.
chunksize : int, optional
Number of documents to be used in each training chunk.
extra_dims : int, optional
Extra samples to be used besides the rank `k`. Can improve accuracy.
power_iters: int, optional
Number of power iteration steps to be used. Increasing the number of power iterations improves accuracy,
but lowers performance.
dtype : numpy.dtype, optional
Enforces a type for elements of the decomposed matrix.
eps: float, optional
Percentage of the spectrum's energy to be discarded.
Notes
-----
The corpus may be larger than RAM (iterator of vectors), if `corpus` is a `scipy.sparse.csc` instead,
it is assumed the whole corpus fits into core memory and a different (more efficient) code path is chosen.
This may return less than the requested number of top `rank` factors, in case the input itself is of lower rank.
The `extra_dims` (oversampling) and especially `power_iters` (power iterations) parameters affect accuracy of the
decomposition.
This algorithm uses `2 + power_iters` passes over the input data. In case you can only afford a single pass,
set `onepass=True` in :class:`~gensim.models.lsimodel.LsiModel` and avoid using this function directly.
The decomposition algorithm is based on `"Finding structure with randomness:
Probabilistic algorithms for constructing approximate matrix decompositions" <https://arxiv.org/abs/0909.4061>`_.
Returns
-------
(np.ndarray 2D, np.ndarray 1D)
The left singular vectors and the singular values of the `corpus`.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations", samples - rank, power_iters)
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = np.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix", str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = np.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike np, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix", str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations", power_iters)
for _ in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i', (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = np.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(
m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel()
)
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i", power_iter + 1)
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
# documents = columns of sparse CSC
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype)
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix", str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = np.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=dtype)
logger.info("2nd phase: constructing %s covariance matrix", str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += np.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix", str(x.shape))
# could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
u, s, vt = scipy.linalg.svd(x)
# sqrt to go back from singular values of X to singular values of B = singular values of the corpus
s = np.sqrt(s)
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s ** 2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = np.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| 43,382 | 41.826259 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/rpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Random Projections (also known as Random Indexing).
For theoretical background on Random Projections, see [1]_.
Examples
--------
>>> from gensim.models import RpModel
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import common_texts, temporary_file
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(text) for text in common_texts] # convert texts to BoW format
>>>
>>> model = RpModel(corpus, id2word=dictionary) # fit model
>>> result = model[corpus[3]] # apply model to document, result is vector in BoW format
>>>
>>> with temporary_file("model_file") as fname:
... model.save(fname) # save model to file
... loaded_model = RpModel.load(fname) # load model
References
----------
.. [1] Kanerva et al., 2000, Random indexing of text samples for Latent Semantic Analysis,
https://cloudfront.escholarship.org/dist/prd/content/qt5644k0w6/qt5644k0w6.pdf
"""
import logging
import numpy as np
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.rpmodel')
class RpModel(interfaces.TransformationABC):
def __init__(self, corpus, id2word=None, num_topics=300):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping `token_id` -> `token`, will be determine from corpus if `id2word == None`.
num_topics : int, optional
Number of topics.
"""
self.id2word = id2word
self.num_topics = num_topics
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "RpModel(num_terms=%s, num_topics=%s)" % (self.num_terms, self.num_topics)
def initialize(self, corpus):
"""Initialize the random projection matrix.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
if self.id2word is None:
logger.info("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
shape = self.num_topics, self.num_terms
logger.info("constructing %s random matrix", str(shape))
# Now construct the projection matrix itself.
# Here i use a particular form, derived in "Achlioptas: Database-friendly random projection",
# and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).
randmat = 1 - 2 * np.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1
# convert from int32 to floats, for faster multiplications
self.projection = np.asfortranarray(randmat, dtype=np.float32)
# TODO: check whether the Fortran-order shenanigans still make sense. In the original
# code (~2010), this made a BIG difference for np BLAS implementations; perhaps now the wrappers
# are smarter and this is no longer needed?
def __getitem__(self, bow):
"""Get random-projection representation of the input vector or corpus.
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Input document or corpus.
Returns
-------
list of (int, float)
if `bow` is document OR
:class:`~gensim.interfaces.TransformedCorpus`
if `bow` is corpus.
Examples
----------
>>> from gensim.models import RpModel
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(text) for text in common_texts] # convert texts to BoW format
>>>
>>> model = RpModel(corpus, id2word=dictionary) # fit model
>>> result = model[corpus[0]] # apply model to document, result is vector in BoW format, i.e. [(1, 0.3), ... ]
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
if getattr(self, 'freshly_loaded', False):
# This is a hack to work around a bug in np, where a FORTRAN-order array
# unpickled from disk segfaults on using it.
self.freshly_loaded = False
self.projection = self.projection.copy('F') # simply making a fresh copy fixes the broken array
vec = matutils.sparse2full(bow, self.num_terms).reshape(self.num_terms, 1) / np.sqrt(self.num_topics)
vec = np.asfortranarray(vec, dtype=np.float32)
topic_dist = np.dot(self.projection, vec) # (k, d) * (d, 1) = (k, 1)
return [
(topicid, float(topicvalue)) for topicid, topicvalue in enumerate(topic_dist.flat)
if np.isfinite(topicvalue) and not np.allclose(topicvalue, 0.0)
]
def __setstate__(self, state):
"""Sets the internal state and updates freshly_loaded to True, called when unpicked.
Parameters
----------
state : dict
State of the class.
"""
self.__dict__ = state
self.freshly_loaded = True
| 5,706 | 35.120253 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Produce word vectors with deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
NOTE: There are more ways to get word vectors in Gensim than just Word2Vec.
See FastText and wrappers for VarEmbed and WordRank.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews,
visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The word vectors are stored in a KeyedVectors instance in model.wv.
This separates the read-only word vector lookup operations in KeyedVectors from the training code in Word2Vec::
>>> model.wv['computer'] # numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
The word vectors can also be instantiated from an existing file on disk in the word2vec C format
as a KeyedVectors instance.
NOTE: It is impossible to continue training the vectors loaded from the C format because hidden weights,
vocabulary frequency and the binary tree is missing::
>>> from gensim.models import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various NLP word tasks with the model. Some of them
are already built-in::
>>> model.wv.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.wv.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> model.wv.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.wv.similarity('woman', 'man')
0.73723527
Probability of a text under the model::
>>> model.score(["The fox jumped over a lazy dog".split()])
0.2158356
Correlation with human opinion on word similarity::
>>> model.wv.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> model.wv.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
If you're finished training a model (i.e. no more updates, only querying),
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
>>> word_vectors = model.wv
>>> del model
to trim unneeded model memory = use much less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict, Counter
import threading
import itertools
import warnings
from gensim.utils import keep_vocab_item, call_on_class_only
from gensim.models.keyedvectors import Vocab, Word2VecKeyedVectors, PoincareWordEmbeddingsKeyedVectors
from gensim.models.base_any2vec import BaseWordEmbeddingsModel
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, dot, zeros, random, dtype, float32 as REAL, float64 as DOUBLE,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt,\
empty, sum as np_sum, ones, logaddexp, log, outer, maximum, minimum, tanh, sign, average
from numpy.linalg import norm
from scipy.special import expit
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.models.keyedvectors import PoincareWordEmbeddingsKeyedVectors
from gensim.utils import deprecated
from six import iteritems, itervalues, string_types
from six.moves import xrange
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_sg_dbl, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_sg(model, sentences, alpha, work=None, compute_loss=False):
"""
Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(
model, model.wv.index2word[word.index], word2.index, alpha, compute_loss=compute_loss
)
result += len(word_vocabs)
return result, 0.0
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None, compute_loss=False):
"""
Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32] # XXX: this sample_int might be useful to figure out what word is
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha, compute_loss=compute_loss)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start: pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, l1)
return log_prob_sentence
# Note: We did not change this function to support Euclidean distance-based similarity during training because training
# is too slow with the Python implementation anyway. We only changed the Cython code.
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None, compute_loss=False, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
if word not in model.wv.vocab:
return
predict_word = model.wv.vocab[word] # target word (NN output)
if is_ft:
l1_vocab = context_vectors_vocab[context_index[0]]
l1_ngrams = np_sum(context_vectors_ngrams[context_index[1:]], axis=0)
if context_index:
l1 = np_sum([l1_vocab, l1_ngrams], axis=0) / len(context_index)
else:
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0) ** predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
model.epoch_training_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T) # k+1 x 1;
fb = expit(prod_term) # propagate hidden -> output
# gb are gradients wrt to the distance function; i.e. dL/d<h,v_w>; to obtain the actual gradients wrt to the
# context and word vectors, we need to multiply gb by d<h,v_w>/dh and d<h,v_w>/dv_w respectively.
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate; k+1 x 1
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output; k+1 x layer1_size
neu1e += dot(gb, l2b) # save error; layer1_size x 1
# loss component corresponding to negative sampling
if compute_loss:
model.epoch_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.epoch_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
if is_ft:
model.wv.syn0_vocab[context_index[0]] += neu1e * context_locks_vocab[context_index[0]]
for i in context_index[1:]:
model.wv.syn0_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True,
compute_loss=False, context_vectors=None, context_locks=None, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
model.epoch_training_loss += sum(-log(expit(-sgn * prod_term)))
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.epoch_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.epoch_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if is_ft:
if not model.cbow_mean and input_word_indices:
neu1e /= (len(input_word_indices[0]) + len(input_word_indices[1]))
for i in input_word_indices[0]:
context_vectors_vocab[i] += neu1e * context_locks_vocab[i]
for i in input_word_indices[1]:
context_vectors_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
context_vectors[i] += neu1e * context_locks[i]
return neu1e
def score_sg_pair(model, word, word2):
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_cbow_pair(model, word, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
class InitializationConfig:
"""
Class that stores information about the way we want to initialize the embeddings. It is used when initializing a
model, before training, from pre-trained embeddings. Particularly, we use this to initialize Poincare embeddings
from a pre-trained Euclidean model that we then project onto the Poincare ball. We use either the identity map or
the exponential map for the projection. We scale the embeddings such that we control how close to the origin they
are initialized.
"""
def __init__(self, pretrained_model_filename, config_str):
self.euclid2hyp_str, self.scaling_factor = config_str.split('0', 1)
self.scaling_factor = float(self.scaling_factor)
if self.euclid2hyp_str == "exp":
euclid2hyp = InitializationConfig.exp_map_0
elif self.euclid2hyp_str == "id":
euclid2hyp = InitializationConfig.identity
else:
raise RuntimeError("Unknown conversion from Euclidean space to hyperbolic space")
model = Word2Vec.load(pretrained_model_filename)
self.vocab_size = len(model.wv.vocab)
sorted_norms0 = sorted(norm(model.wv.vectors, axis=1))
sorted_norms1 = sorted(norm(model.trainables.syn1neg, axis=1))
# Scale embeddings to be inside the unit ball, and then scale them again using the scaling_factor
self.init_vectors = euclid2hyp(model.wv.vectors / sorted_norms0[-1] * self.scaling_factor)
self.init_syn1neg = euclid2hyp(model.trainables.syn1neg / sorted_norms1[-1] * self.scaling_factor)
@staticmethod
def exp_map_0(X):
# X is a matrix that has embeddings as rows
MAX_TANH_ARG = 15.0
norm_x = norm(X, axis=1)
coef = tanh(minimum(maximum(norm_x, -MAX_TANH_ARG), MAX_TANH_ARG)) / norm_x
Y = X * coef[:, None]
return PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(X, Y)
@staticmethod
def identity(X):
return X
def __str__(self):
return "INIT" + self.euclid2hyp_str + str(self.scaling_factor)
class Word2Vec(BaseWordEmbeddingsModel):
"""Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
If you're finished training a model (=no more updates, only querying)
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
The model can be stored/loaded via its :meth:`~gensim.models.word2vec.Word2Vec.save()` and
:meth:`~gensim.models.word2vec.Word2Vec.load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `wv.save_word2vec_format()`
and `Word2VecKeyedVectors.load_word2vec_format()`.
"""
def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, l2reg_coef=0.0, optimizer=None,
sg=0, hs=0, is_nll=False, burnin_epochs=0, negative=5, normalized=False, euclid=0, poincare=0, torus=0,
cbow_mean=1, hashfxn=hash, iter=5, null_word=0, trim_rule=None, sorted_vocab=1, sim_func=None,
cosh_dist_pow=0, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False, with_bias=False,
init_near_border=False, initialization_config=None, ckpt_word_list=None, debug=False, callbacks=()):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
sg : int {1, 0}
Defines the training algorithm. If 1, skip-gram is employed; otherwise, CBOW is used.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
l2reg_coef : float
Regularization coefficient. The weight of the penalty applied by the regularizer. Currently we regularize
the norm of the input embedding of some frequent words.
optimizer : str
The name of the optimizer that we want to use to train the model (e.g. SGD, RSGD, AdaGrad etc)
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
is_nll : bool
If true, then use NLL loss for SkipGram, otherwise use Negative Sampling loss.
burnin_epochs : int
Number of epochs of the burn-in stage, before training starts.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
normalized : bool
If true, then normalize the word vectors to unit norm after each update.
euclid : int
If = 0 then dot product between word embeddings is used during training. Otherwise, we use some
custom similarity function based on the Euclidean distance between the two word embeddings.
poincare : int
If = 1 then we embed the words in the Poincare ball model of hyperbolic space and use the Moebius gyrovector
space to compute parallel transport and distances inside the manifold.
torus : int
If = 1 then we embed the words on an n-torus, where n is given by `size`.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int
Number of iterations (epochs) over the corpus.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
sim_func : str
Similarity function used in the loss function during training.
cosh_dist_pow : int
Power, used if we use COSH_DIST_POW_K during training.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
compute_loss : bool
If True, computes and stores loss value which can be retrieved using `model.get_latest_training_loss()`.
with_bias : bool
If True, then use a model with biases. Otherwise, only have input/output embeddings for each word as
parameters.
init_near_border : bool
If True, initialize word embeddings close to the Poincare ball border, instead of close to the origin. Only
works for Poincare embeddings.
initialization_config : InitializationConfig
The configuration that will be used to initialize the embeddings from a pretrained model.
ckpt_word_list : list of strings
List of words for which we store checkpoints of their embedding during training for vizualisation purposes.
debug : bool
If True, then it will run in debug mode.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
Examples
--------
Initialize and train a `Word2Vec` model
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = Word2Vec(sentences, min_count=1)
>>> say_vector = model['say'] # get vector for word
"""
self.callbacks = callbacks
self.load = call_on_class_only
vector_dtype = DOUBLE if poincare == 1 else REAL
# Sanity checks.
if euclid + poincare + torus > 1:
raise RuntimeError("You must select only one model type. Given euclid {}, poincare {}, torus {}".format(
euclid, poincare, torus
))
self.optimizer = optimizer
if not optimizer:
if poincare == 1:
self.optimizer = "rsgd"
else:
self.optimizer = "full_rsgd"
self.sim_func = sim_func
self.cosh_dist_pow = cosh_dist_pow
self.vocabulary = Word2VecVocab(
max_vocab_size=max_vocab_size, min_count=min_count, sample=sample,
sorted_vocab=bool(sorted_vocab), null_word=null_word)
self.trainables = Word2VecTrainables(
seed=seed, vector_size=size, vector_dtype=vector_dtype, hashfxn=hashfxn, optimizer=self.optimizer,
initialization_config=initialization_config)
self.debug = debug
self.is_nll = is_nll
self.l2reg_coef = l2reg_coef
self.wv = None
if poincare == 1:
self.wv = PoincareWordEmbeddingsKeyedVectors(size, vector_dtype, self.trainables, init_near_border)
else:
self.wv = Word2VecKeyedVectors(size, vector_dtype)
if ckpt_word_list:
self.word_checkpoints = WordEmbeddingCheckpoints(ckpt_word_list, self)
if poincare == 1:
self.num_projections = 0
super(Word2Vec, self).__init__(
sentences=sentences, workers=workers, vector_size=size, vector_dtype=vector_dtype, epochs=iter,
callbacks=callbacks, batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window,
seed=seed, hs=hs, negative=negative, normalized=normalized, euclid=euclid, poincare=poincare, torus=torus,
burnin_epochs=burnin_epochs, cbow_mean=cbow_mean, min_alpha=min_alpha, compute_loss=compute_loss,
fast_version=FAST_VERSION, with_bias=with_bias)
def _log_progress(self, job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed):
if total_examples:
# examples-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i, batch loss %f",
cur_epoch + 1, 100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue), float(self.batch_training_loss)
)
else:
# words-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch + 1, 100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
def _log_epoch_end(self, cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed):
logger.info(
"EPOCH - %i : training on %i raw words (%i effective words) took %.1fs, %.0f effective words, batch loss %f, epoch loss %f",
cur_epoch + 1, raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed,
float(self.batch_training_loss), float(self.get_latest_training_loss())
)
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"EPOCH - %i : supplied example count (%i) did not equal expected count (%i)", cur_epoch + 1,
example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"EPOCH - %i : supplied raw word count (%i) did not equal expected count (%i)", cur_epoch + 1,
raw_word_count, total_words
)
def _do_train_job(self, sentences, alpha, inits, thread_id):
"""
Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
"""
work, neu1 = inits
tally = 0
if self.sg:
if self.poincare == 1:
curr_tally, batch_loss = train_batch_sg_dbl(self, sentences, alpha, work, self.compute_loss)
tally += curr_tally
if thread_id == 0:
self.batch_training_loss = batch_loss
else:
curr_tally, batch_loss = train_batch_sg(self, sentences, alpha, work, self.compute_loss)
tally += curr_tally
if thread_id == 0:
self.batch_training_loss = batch_loss
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _clear_post_train(self):
"""Resets certain properties of the model, post training."""
self.wv.vectors_norm = None
def _set_train_params(self, **kwargs):
if 'compute_loss' in kwargs:
self.compute_loss = kwargs['compute_loss']
self.running_training_loss = 0
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=False, callbacks=()):
"""Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progress-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) **MUST** be provided (if the corpus is the same as was provided to
:meth:`~gensim.models.word2vec.Word2Vec.build_vocab()`, the count of examples in that corpus
will be available in the model's :attr:`corpus_count` property).
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case,
where :meth:`~gensim.models.word2vec.Word2Vec.train()` is only called once,
the model's cached `iter` value should be supplied as `epochs` value.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float
Initial learning rate.
end_alpha : float
Final learning rate. Drops linearly from `start_alpha`.
word_count : int
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
compute_loss: bool
If True, computes and stores loss value which can be retrieved using `model.get_latest_training_loss()`.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
Examples
--------
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = Word2Vec(min_count=1)
>>> model.build_vocab(sentences)
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
"""
return super(Word2Vec, self).train(
sentences, total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, compute_loss=compute_loss, callbacks=callbacks)
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that).
We have currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with hs=1 and negative=0 for this to work.
Note that you should specify total_sentences; we'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the article by [#taddy]_ and the gensim demo at [#deepir]_ for examples of
how to use such scores in document classification.
.. [#taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations,
in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
.. [#deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_sentences : int
Count of sentences.
chunksize : int
Chunksize of jobs
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
"""
if FAST_VERSION < 0:
warnings.warn(
"C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness."
)
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv.vocab), self.trainables.layer1_size, self.sg, self.hs,
self.vocabulary.sample, self.negative
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=self.vector_dtype) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.trainables.layer1_size, dtype=self.vector_dtype)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=self.vector_dtype)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def clear_sims(self):
"""Removes all L2-normalized vectors for words from the model.
You will have to recompute them using init_sims method.
"""
self.wv.vectors_norm = None
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
Parameters
----------
fname : str
The file path used to save the vectors in
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
lockf : float
Lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
"""
overlap_count = 0
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if not vector_size == self.wv.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(self.vector_dtype).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=self.vector_dtype)
if word in self.wv.vocab:
overlap_count += 1
self.wv.vectors[self.wv.vocab[word].index] = weights
self.trainables.vectors_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0=no changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [self.vector_dtype(x) for x in parts[1:]]
if word in self.wv.vocab:
overlap_count += 1
self.wv.vectors[self.wv.vocab[word].index] = weights
self.trainables.vectors_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0=no changes
logger.info("merged %d vectors into %s matrix from %s", overlap_count, self.wv.vectors.shape, fname)
@deprecated("Method will be removed in 4.0.0, use self.wv.__getitem__() instead")
def __getitem__(self, words):
"""
Deprecated. Use self.wv.__getitem__() instead.
Refer to the documentation for `gensim.models.keyedvectors.Word2VecKeyedVectors.__getitem__`
"""
return self.wv.__getitem__(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.__contains__() instead")
def __contains__(self, word):
"""
Deprecated. Use self.wv.__contains__() instead.
Refer to the documentation for `gensim.models.keyedvectors.Word2VecKeyedVectors.__contains__`
"""
return self.wv.__contains__(word)
def predict_output_word(self, context_words_list, topn=10):
"""Report the probability distribution of the center word given the context words
as input to the trained model.
Parameters
----------
context_words_list : :obj: `list` of :obj: `str`
List of context words
topn: int
Return `topn` words and their probabilities
Returns
-------
:obj: `list` of :obj: `tuple`
`topn` length list of tuples of (word, probability)
"""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'vectors') or not hasattr(self.trainables, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word_vocabs = [self.wv.vocab[w] for w in context_words_list if w in self.wv.vocab]
if not word_vocabs:
warnings.warn("All the input context words are out-of-vocabulary for the current model.")
return None
word2_indices = [word.index for word in word_vocabs]
l1 = np_sum(self.wv.vectors[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
# propagate hidden -> output and take softmax to get probabilities
prob_values = exp(dot(l1, self.trainables.syn1neg.T))
prob_values /= sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]
def init_sims(self, replace=False):
"""
init_sims() resides in KeyedVectors because it deals with syn0/vectors mainly, but because syn1 is not an
attribute of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0/vectors happens
inside of KeyedVectors
"""
if replace and hasattr(self.trainables, 'syn1'):
del self.trainables.syn1
return self.wv.init_sims(replace)
def reset_from(self, other_model):
"""Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.wv.vocab = other_model.wv.vocab
self.wv.index2word = other_model.wv.index2word
self.vocabulary.cum_table = other_model.vocabulary.cum_table
self.corpus_count = other_model.corpus_count
self.trainables.reset_weights(self.hs, self.negative, self.wv, other_model.with_bias)
@staticmethod
def log_accuracy(section):
return Word2VecKeyedVectors.log_accuracy(section)
@deprecated("Method will be removed in 4.0.0, use self.wv.accuracy() instead")
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
most_similar = most_similar or Word2VecKeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index2word), self.wv.vector_size, self.alpha
)
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
"""Discard parameters that are used in training and score. Use if you're sure you're done training a model.
If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
"""
if replace_word_vectors_with_normalized:
self.init_sims(replace=True)
self._minimize_model()
def save(self, *args, **kwargs):
"""Save the model. This saved model can be loaded again using :func:`~gensim.models.word2vec.Word2Vec.load`,
which supports online training and getting vectors for vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['vectors_norm', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
def get_batch_training_loss(self):
return self.batch_training_loss
def get_latest_training_loss(self):
return self.running_training_loss
@deprecated(
"Method will be removed in 4.0.0, keep just_word_vectors = model.wv to retain just the KeyedVectors instance"
)
def _minimize_model(self, save_syn1=False, save_syn1neg=False, save_vectors_lockf=False):
if save_syn1 and save_syn1neg and save_vectors_lockf:
return
if hasattr(self.trainables, 'syn1') and not save_syn1:
del self.trainables.syn1
if hasattr(self.trainables, 'syn1neg') and not save_syn1neg:
del self.trainables.syn1neg
if hasattr(self.trainables, 'vectors_lockf') and not save_vectors_lockf:
del self.trainables.vectors_lockf
self.model_trimmed_post_training = True
@classmethod
def load_word2vec_format(
cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""Deprecated. Use model.wv.save_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use model.wv.save_word2vec_format instead.")
@classmethod
def load(cls, *args, **kwargs):
"""Loads a previously saved `Word2Vec` model. Also see `save()`.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:obj: `~gensim.models.word2vec.Word2Vec`
Returns the loaded model as an instance of :class: `~gensim.models.word2vec.Word2Vec`.
"""
try:
return super(Word2Vec, cls).load(*args, **kwargs)
except AttributeError:
logger.info('Model saved using code from earlier Gensim Version. Re-loading old model in a compatible way.')
from gensim.models.deprecated.word2vec import load_old_word2vec
return load_old_word2vec(*args, **kwargs)
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or not clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences(object):
"""Works like word2vec.LineSentence, but will process all files in a directory in alphabetical order by filename.
The directory can only contain files that can be read by LineSentence: .bz2, .gz, and text files.
Any file not ending with .bz2 or .gz is assumed to be a text file. Does not work with subdirectories.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` should be a path to a directory (as a string) where all files can be opened by the
LineSentence class. Each file will be read up to `limit` lines (or not clipped if limit is None, the default).
Example::
sentences = PathLineSentences(os.getcwd() + '\\corpus\\')
The files in the directory should be either text files, .bz2 files, or .gz files.
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.smart_open(file_name) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
class Word2VecVocab(utils.SaveLoad):
def __init__(self, max_vocab_size=None, min_count=5, sample=1e-3, sorted_vocab=True, null_word=0):
self.max_vocab_size = max_vocab_size
self.min_count = min_count
self.sample = sample
self.sorted_vocab = sorted_vocab
self.null_word = null_word
self.cum_table = None # for negative sampling
self.raw_vocab = None
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence)
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1
)
corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
def sort_vocab(self, wv):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if len(wv.vectors):
raise RuntimeError("cannot sort vocabulary after model weights already initialized.")
wv.index2word.sort(key=lambda word: wv.vocab[word].count, reverse=True)
for i, word in enumerate(wv.index2word):
wv.vocab[word].index = i
def prepare_vocab(self, hs, negative, wv, update=False, keep_raw_vocab=False, trim_rule=None,
min_count=None, sample=None, dry_run=False):
"""Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
wv.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
wv.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
wv.vocab[word] = Vocab(count=v, index=len(wv.index2word))
wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info(
"min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info(
"min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
min_count, retain_total, retain_pct, original_total, drop_total
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
if word in wv.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
wv.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
wv.vocab[word] = Vocab(count=v, index=len(wv.index2word))
wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info(
"New added %i unique words (%i%% of original %i) "
"and increased the count of %i pre-existing words (%i%% of original %i)",
len(new_words), new_unique_pct, original_unique_total, len(pre_exist_words),
pre_exist_unique_pct, original_unique_total
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
wv.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info(
"downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'num_retained_words': len(retain_words)
}
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
self.add_null_word(wv)
if self.sorted_vocab and not update:
self.sort_vocab(wv)
# Create index2freq. This needs to be done after we sort the vocabulary, so after we settle the indexes.
for word in wv.index2word:
wv.index2freq.append(wv.vocab[word].count)
wv.index2freq = array(wv.index2freq, dtype=uint32)
if hs:
# add info about each word's Huffman encoding
self.create_binary_tree(wv)
if negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table(wv)
return report_values
def add_null_word(self, wv):
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(wv.vocab)
wv.index2word.append(word)
wv.vocab[word] = v
def create_binary_tree(self, wv):
"""Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(wv.vocab))
# build the huffman tree
heap = list(itervalues(wv.vocab))
heapq.heapify(heap)
for i in xrange(len(wv.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Vocab(count=min1.count + min2.count, index=i + len(wv.vocab), left=min1, right=min2)
)
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(wv.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(wv.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def make_cum_table(self, wv, power=0.75, domain=2**31 - 1):
"""Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(wv.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in xrange(vocab_size):
train_words_pow += wv.vocab[wv.index2word[word_index]].count**power
cumulative = 0.0
for word_index in xrange(vocab_size):
cumulative += wv.vocab[wv.index2word[word_index]].count**power
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
class MyVocab(Word2VecVocab):
def scan_vocab(self, sentences, window_size, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
self.cooccurrence_matrix = Counter()
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence)
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
sentence_len = len(sentence)
for i, word in enumerate(sentence):
vocab[word] += 1
start = max(0, i-window_size)
end = min(sentence_len, i+window_size+1)
# Look in context.
for j in range(start, end):
if j == i:
# Skip current word.
continue
self.cooccurrence_matrix.update(itertools.combinations([word, sentence[j]], 2))
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1
)
corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
class Word2VecTrainables(utils.SaveLoad):
def __init__(self, vector_size=100, vector_dtype=REAL, seed=1, hashfxn=hash, optimizer=None,
initialization_config=None):
self.hashfxn = hashfxn
self.layer1_size = vector_size
self.vector_dtype = vector_dtype
self.seed = seed
self.optimizer = optimizer
self.initialization_config = initialization_config
def prepare_weights(self, hs, negative, wv, update=False, vocabulary=None, with_bias=False, torus=False):
"""Build tables and model weights based on final vocabulary settings."""
# If a model_filename is provided, then load the model and use the embeddings in that model as initialization.
# set initial input/projection and hidden weights
if not update:
self.reset_weights(hs, negative, wv, with_bias)
else:
self.update_weights(hs, negative, wv, with_bias)
def seeded_vector(self, seed_string, vector_size, max_abs_value=None):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
if max_abs_value == None:
return (once.rand(vector_size) - 0.5) / vector_size
else:
return once.rand(vector_size) * 2 * max_abs_value - max_abs_value
def seeded_bounded_vector(self, seed_string, vector_size, min_value, max_value):
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
vec = once.uniform(low=min_value, high=max_value, size=vector_size)
return vec * sign(once.rand(vector_size) - 0.5)
def reset_weights(self, hs, negative, wv, with_bias=False):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
wv.vectors = empty((len(wv.vocab), wv.vector_size), dtype=self.vector_dtype)
if not self.initialization_config:
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(wv.vocab)):
# construct deterministic seed from word AND seed argument
if isinstance(wv, PoincareWordEmbeddingsKeyedVectors) and wv.init_near_border:
wv.vectors[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
wv.vector_size,
max_abs_value=0.001)
# vector_norm = random.uniform(low=0.87, high=0.89)
vector_norm = random.uniform(low=0.8, high=0.85)
wv.vectors[i] = wv.vectors[i] / norm(wv.vectors[i]) * vector_norm
else:
wv.vectors[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
wv.vector_size,
max_abs_value=0.001)
norms = norm(wv.vectors, axis=1)
print("[Target vector init] Average/Min/Max norm is {} / {} / {}".format(
average(norms), min(norms), max(norms)
))
if hs:
self.syn1 = zeros((len(wv.vocab), self.layer1_size), dtype=self.vector_dtype)
if negative:
if isinstance(wv, PoincareWordEmbeddingsKeyedVectors) and wv.init_near_border:
self.syn1neg = empty((len(wv.vocab), self.layer1_size), dtype=self.vector_dtype)
for i in xrange(len(wv.vocab)):
self.syn1neg[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
wv.vector_size,
max_abs_value=0.001)
vector_norm = random.uniform(low=0.5, high=0.6)
self.syn1neg[i] = self.syn1neg[i] / norm(self.syn1neg[i]) * vector_norm
norms = norm(self.syn1neg, axis=1)
print("[Context vector init] Average/Min/Max norm is {} / {} / {}".format(
average(norms), min(norms), max(norms)
))
else:
self.syn1neg = zeros((len(wv.vocab), self.layer1_size), dtype=self.vector_dtype)
else:
if hs:
raise RuntimeError("Only negative sampling models can be currently initialized from pretrained embeddings")
if self.initialization_config.vocab_size != len(wv.vocab):
raise RuntimeError("Mismatch in vocabulary size between pretrained model and current model")
# Initialize with pretrained embeddings.
wv.vectors = self.initialization_config.init_vectors.astype(DOUBLE)
self.syn1neg = self.initialization_config.init_syn1neg.astype(DOUBLE)
wv.vectors_norm = None
# Only implemented model with bias for SGNS (negative sampling model).
if negative and with_bias:
self.b0 = zeros((len(wv.vocab), 1), dtype=self.vector_dtype)
self.b1 = zeros((len(wv.vocab), 1), dtype=self.vector_dtype)
# For RMSprop, define and initialize the arrays for accumulating historical square gradients.
if self.optimizer == "rmsprop":
# Accumulate gradients of input embeddings. vocab_size * (vector_size + 1) vector
self.Gsyn0 = zeros((len(wv.vocab) * self.layer1_size, 1), dtype=self.vector_dtype)
# Accumulate gradients of output embeddings. vocab_size * (vector_size + 1) vector
self.Gsyn1neg = zeros((len(wv.vocab) * self.layer1_size, 1), dtype=self.vector_dtype)
if with_bias:
# Accumulate gradients for biases.
self.Gb0 = zeros(len(wv.vocab), dtype=self.vector_dtype)
self.Gb1 = zeros(len(wv.vocab), dtype=self.vector_dtype)
self.vectors_lockf = ones(len(wv.vocab), dtype=self.vector_dtype) # zeros suppress learning
def update_weights(self, hs, negative, wv, with_bias=False):
"""
Copy all the existing weights, and reset the weights for the newly
added vocabulary.
"""
logger.info("updating layer weights")
gained_vocab = len(wv.vocab) - len(wv.vectors)
newvectors = empty((gained_vocab, wv.vector_size), dtype=self.vector_dtype)
# randomize the remaining words
for i in xrange(len(wv.vectors), len(wv.vocab)):
# construct deterministic seed from word AND seed argument
newvectors[i - len(wv.vectors)] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
wv.vector_size,
max_abs_value=0.001)
# Raise an error if an online update is run before initial training on a corpus
if not len(wv.vectors):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
wv.vectors = vstack([wv.vectors, newvectors])
if hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=self.vector_dtype)])
if negative:
self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=self.vector_dtype)])
if with_bias:
self.b0 = self.b0 + zeros((gained_vocab, 1), dtype=self.vector_dtype)
self.b1 = self.b1 + zeros((gained_vocab, 1), dtype=self.vector_dtype)
wv.vectors_norm = None
# do not suppress learning for already learned words
self.vectors_lockf = ones(len(wv.vocab), dtype=self.vector_dtype) # zeros suppress learning
# TODO: we should update AdaGrad arrays here too, when AdaGrad will be functional
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
logger.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
class WordEmbeddingCheckpoints(utils.SaveLoad):
def __init__(self, word_list, model):
self.checkpoints = None
self.word_list = word_list
self.model = model
self.is_word_list_filtered = False
def add_checkpoints(self):
if not self.is_word_list_filtered:
self.is_word_list_filtered = True
self.word_list = list(filter(lambda w: w in self.model.wv.vocab, self.word_list))
self.checkpoints = dict(zip(self.word_list, [[]] * len(self.word_list)))
print("[Checkpoints] Will save checkpoints for {} words".format(len(self.word_list)))
for word in self.checkpoints:
index = self.model.wv.vocab[word].index
self.checkpoints[word] = self.checkpoints[word] + [array(self.model.wv.vectors[index])]
def convert_to_list(self):
if not self.checkpoints:
return
for word in self.checkpoints:
self.checkpoints[word] = self.checkpoints[word].tolist()
def save(self, file):
for word in self.checkpoints:
self.checkpoints[word] = array(self.checkpoints[word])
super(WordEmbeddingCheckpoints, self).save(file)
| 90,707 | 46.993651 | 155 | py |
poincare_glove | poincare_glove-master/gensim/models/lsi_dispatcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
""":class:`~gensim.models.lsi_dispatcher.Dispatcher` process which orchestrates
distributed :class:`~gensim.models.lsimodel.LsiModel` computations.
Run this script only once, on the master node in your cluster.
Notes
-----
The dispatches expects to find worker scripts already running. Make sure you run as many workers as you like on
your machines **before** launching the dispatcher.
Warnings
--------
Requires installed `Pyro4 <https://pythonhosted.org/Pyro4/>`_.
Distributed version works only in local network.
How to use distributed :class:`~gensim.models.lsimodel.LsiModel`
----------------------------------------------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lsi_worker &
#. Run dispatcher ::
python -m gensim.models.lsi_dispatcher &
#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode ::
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lsi_dispatcher --help
:ellipsis: 0, -5
"""
from __future__ import with_statement
import os
import sys
import logging
import argparse
import threading
import time
from six import iteritems, itervalues
try:
from Queue import Queue
except ImportError:
from queue import Queue
import Pyro4
from gensim import utils
logger = logging.getLogger(__name__)
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""Dispatcher object that communicates and coordinates individual workers.
Warnings
--------
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""Partly initializes the dispatcher.
A full initialization (including initialization of the workers) requires a call to
:meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`
Parameters
----------
maxsize : int, optional
Maximum number of jobs to be kept pre-fetched in the queue.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
@Pyro4.expose
def initialize(self, **model_params):
"""Fully initializes the dispatcher and all its workers.
Parameters
----------
**model_params
Keyword parameters used to initialize individual workers, see :class:`~gensim.models.lsimodel.LsiModel`.
Raises
------
RuntimeError
When no workers are found (the `gensim.scripts.lsi_worker` script must be ran beforehand).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
for name, uri in iteritems(ns.list(prefix='gensim.lsi_worker')):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i from %s", workerid, uri)
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server" % uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""Get pyro URIs of all registered workers.
Returns
-------
list of URIs
The pyro URIs for each worker.
"""
return [worker._pyroUri for worker in itervalues(self.workers)]
@Pyro4.expose
def getjob(self, worker_id):
"""Atomically pops a job from the queue.
Parameters
----------
worker_id : int
The worker that requested the job.
Returns
-------
iterable of iterable of (int, float)
The corpus in BoW format.
"""
logger.info("worker #%i requesting a new job", worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
return job
@Pyro4.expose
def putjob(self, job):
"""Atomically add a job to the queue.
Parameters
----------
job : iterable of iterable of (int, float)
The corpus in BoW format.
"""
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""Merge projections from across all workers and get the final projection.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
The current projection of the total model.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers", len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s", workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
@Pyro4.expose
def reset(self):
"""Re-initialize all workers for a new decomposition."""
for workerid, worker in iteritems(self.workers):
logger.info("resetting worker %s", workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""Callback used by workers to notify when their job is done.
The job done event is logged and then control is asynchronously transfered back to the worker
(who can then request another job). In this way, control flow basically oscillates between
:meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and
:meth:`gensim.models.lsi_worker.Worker.requestjob`.
Parameters
----------
workerid : int
The ID of the worker that finished the job (used for logging).
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.
Returns
-------
int
Number of jobs already completed.
"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""Terminate all registered workers and then the dispatcher."""
for workerid, worker in iteritems(self.workers):
logger.info("terminating worker %s", workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'maxsize', type=int, help='Maximum number of jobs to be kept pre-fetched in the queue.', default=MAX_JOBS_QUEUE
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))
logger.info("finished running %s", parser.prog)
| 9,992 | 32.874576 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/poincare.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python implementation of Poincaré Embeddings [1]_, an embedding that is better at capturing latent hierarchical
information than traditional Euclidean embeddings. The method is described in more detail in [1]_.
The main use-case is to automatically learn hierarchical representations of nodes from a tree-like structure,
such as a Directed Acyclic Graph, using a transitive closure of the relations. Representations of nodes in a
symmetric graph can also be learned, using an iterable of the direct relations in the graph.
This module allows training a Poincaré Embedding from a training file containing relations of graph in a
csv-like format, or a Python iterable of relations.
.. [1] Maximilian Nickel, Douwe Kiela - "Poincaré Embeddings for Learning Hierarchical Representations"
https://arxiv.org/abs/1705.08039
Examples
--------
Initialize and train a model from a list:
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
Initialize and train a model from a file containing one relation per line:
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
>>> model.train(epochs=50)
"""
import csv
import logging
import sys
import time
import numpy as np
from collections import defaultdict, Counter
from numpy import random as np_random
from scipy.stats import spearmanr
from six import string_types
from smart_open import smart_open
from gensim import utils, matutils
from gensim.models.keyedvectors import Vocab, BaseKeyedVectors
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format
from numpy import float32 as REAL
try:
from autograd import grad # Only required for optionally verifying gradients while training
from autograd import numpy as grad_np
AUTOGRAD_PRESENT = True
except ImportError:
AUTOGRAD_PRESENT = False
logger = logging.getLogger(__name__)
class PoincareModel(utils.SaveLoad):
"""Class for training, using and evaluating Poincare Embeddings.
The model can be stored/loaded via its :meth:`~gensim.models.poincare.PoincareModel.save`
and :meth:`~gensim.models.poincare.PoincareModel.load` methods, or stored/loaded in the word2vec format
via `model.kv.save_word2vec_format` and :meth:`~gensim.models.poincare.PoincareKeyedVectors.load_word2vec_format`.
Note that training cannot be resumed from a model loaded via `load_word2vec_format`, if you wish to train further,
use :meth:`~gensim.models.poincare.PoincareModel.save` and :meth:`~gensim.models.poincare.PoincareModel.load`
methods instead.
"""
def __init__(self, train_data, size=50, alpha=0.1, negative=10, workers=1, epsilon=1e-5, regularization_coeff=1.0,
burn_in=10, burn_in_alpha=0.01, init_range=(-0.001, 0.001), dtype=np.float64, seed=0):
"""Initialize and train a Poincare embedding model from an iterable of relations.
Parameters
----------
train_data : iterable of (str, str)
Iterable of relations, e.g. a list of tuples, or a PoincareRelations instance streaming from a file.
Note that the relations are treated as ordered pairs, i.e. a relation (a, b) does not imply the
opposite relation (b, a). In case the relations are symmetric, the data should contain both relations
(a, b) and (b, a).
size : int, optional
Number of dimensions of the trained model.
alpha : float, optional
Learning rate for training.
negative : int, optional
Number of negative samples to use.
workers : int, optional
Number of threads to use for training the model.
epsilon : float, optional
Constant used for clipping embeddings below a norm of one.
regularization_coeff : float, optional
Coefficient used for l2-regularization while training (0 effectively disables regularization).
burn_in : int, optional
Number of epochs to use for burn-in initialization (0 means no burn-in).
burn_in_alpha : float, optional
Learning rate for burn-in initialization, ignored if `burn_in` is 0.
init_range : 2-tuple (float, float)
Range within which the vectors are randomly initialized.
dtype : numpy.dtype
The numpy dtype to use for the vectors in the model (numpy.float64, numpy.float32 etc).
Using lower precision floats may be useful in increasing training speed and reducing memory usage.
seed : int, optional
Seed for random to ensure reproducibility.
Examples
--------
Initialize a model from a list:
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
Initialize a model from a file containing one relation per line:
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
See :class:`~gensim.models.poincare.PoincareRelations` for more options.
"""
self.train_data = train_data
self.kv = PoincareKeyedVectors(size)
self.size = size
self.train_alpha = alpha # Learning rate for training
self.burn_in_alpha = burn_in_alpha # Learning rate for burn-in
self.alpha = alpha # Current learning rate
self.negative = negative
self.workers = workers
self.epsilon = epsilon
self.regularization_coeff = regularization_coeff
self.burn_in = burn_in
self._burn_in_done = False
self.dtype = dtype
self.seed = seed
self._np_random = np_random.RandomState(seed)
self.init_range = init_range
self._loss_grad = None
self._load_relations()
self._init_embeddings()
def _load_relations(self):
"""Load relations from the train data and build vocab."""
vocab = {}
index2word = []
all_relations = [] # List of all relation pairs
node_relations = defaultdict(set) # Mapping from node index to its related node indices
logger.info("Loading relations from train data..")
for relation in self.train_data:
if len(relation) != 2:
raise ValueError('Relation pair "%s" should have exactly two items' % repr(relation))
for item in relation:
if item in vocab:
vocab[item].count += 1
else:
vocab[item] = Vocab(count=1, index=len(index2word))
index2word.append(item)
node_1, node_2 = relation
node_1_index, node_2_index = vocab[node_1].index, vocab[node_2].index
node_relations[node_1_index].add(node_2_index)
relation = (node_1_index, node_2_index)
all_relations.append(relation)
logger.info("Loaded %d relations from train data, %d nodes", len(all_relations), len(vocab))
self.kv.vocab = vocab
self.kv.index2word = index2word
self.indices_set = set((range(len(index2word)))) # Set of all node indices
self.indices_array = np.array(range(len(index2word))) # Numpy array of all node indices
counts = np.array([self.kv.vocab[index2word[i]].count for i in range(len(index2word))], dtype=np.float64)
self._node_probabilities = counts / counts.sum()
self._node_probabilities_cumsum = np.cumsum(self._node_probabilities)
self.all_relations = all_relations
self.node_relations = node_relations
self._negatives_buffer = NegativesBuffer([]) # Buffer for negative samples, to reduce calls to sampling method
self._negatives_buffer_size = 2000
def _init_embeddings(self):
"""Randomly initialize vectors for the items in the vocab."""
shape = (len(self.kv.index2word), self.size)
self.kv.syn0 = self._np_random.uniform(self.init_range[0], self.init_range[1], shape).astype(self.dtype)
def _get_candidate_negatives(self):
"""Returns candidate negatives of size `self.negative` from the negative examples buffer.
Returns
-------
numpy.array
Array of shape (`self.negative`,) containing indices of negative nodes.
"""
if self._negatives_buffer.num_items() < self.negative:
# Note: np.random.choice much slower than random.sample for large populations, possible bottleneck
uniform_numbers = self._np_random.random_sample(self._negatives_buffer_size)
cumsum_table_indices = np.searchsorted(self._node_probabilities_cumsum, uniform_numbers)
self._negatives_buffer = NegativesBuffer(cumsum_table_indices)
return self._negatives_buffer.get_items(self.negative)
def _sample_negatives(self, node_index):
"""Return a sample of negatives for the given node.
Parameters
----------
node_index : int
Index of the positive node for which negative samples are to be returned.
Returns
-------
numpy.array
Array of shape (self.negative,) containing indices of negative nodes for the given node index.
"""
node_relations = self.node_relations[node_index]
num_remaining_nodes = len(self.kv.vocab) - len(node_relations)
if num_remaining_nodes < self.negative:
raise ValueError(
'Cannot sample %d negative nodes from a set of %d negative nodes for %s' %
(self.negative, num_remaining_nodes, self.kv.index2word[node_index])
)
positive_fraction = float(len(node_relations)) / len(self.kv.vocab)
if positive_fraction < 0.01:
# If number of positive relations is a small fraction of total nodes
# re-sample till no positively connected nodes are chosen
indices = self._get_candidate_negatives()
unique_indices = set(indices)
times_sampled = 1
while (len(indices) != len(unique_indices)) or (unique_indices & node_relations):
times_sampled += 1
indices = self._get_candidate_negatives()
unique_indices = set(indices)
if times_sampled > 1:
logger.debug('Sampled %d times, positive fraction %.5f', times_sampled, positive_fraction)
else:
# If number of positive relations is a significant fraction of total nodes
# subtract positively connected nodes from set of choices and sample from the remaining
valid_negatives = np.array(list(self.indices_set - node_relations))
probs = self._node_probabilities[valid_negatives]
probs /= probs.sum()
indices = self._np_random.choice(valid_negatives, size=self.negative, p=probs, replace=False)
return list(indices)
@staticmethod
def _loss_fn(matrix, regularization_coeff=1.0):
"""Given a numpy array with vectors for u, v and negative samples, computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
regularization_coeff : float
Coefficient to use for l2-regularization
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
norm = grad_np.linalg.norm(vector_u)
all_norms = grad_np.linalg.norm(vectors_v, axis=1)
poincare_dists = grad_np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
exp_negative_distances = grad_np.exp(-poincare_dists)
regularization_term = regularization_coeff * grad_np.linalg.norm(vectors_v[0]) ** 2
return -grad_np.log(exp_negative_distances[0] / (exp_negative_distances.sum())) + regularization_term
@staticmethod
def _clip_vectors(vectors, epsilon):
"""Clip vectors to have a norm of less than one.
Parameters
----------
vectors : numpy.array
Can be 1-D,or 2-D (in which case the norm for each row is checked).
epsilon : float
Parameter for numerical stability, each dimension of the vector is reduced by `epsilon`
if the norm of the vector is greater than or equal to 1.
Returns
-------
numpy.array
Array with norms clipped below 1.
"""
one_d = len(vectors.shape) == 1
threshold = 1 - epsilon
if one_d:
norm = np.linalg.norm(vectors)
if norm < threshold:
return vectors
else:
return vectors / norm - (np.sign(vectors) * epsilon)
else:
norms = np.linalg.norm(vectors, axis=1)
if (norms < threshold).all():
return vectors
else:
vectors[norms >= threshold] *= (threshold / norms[norms >= threshold])[:, np.newaxis]
vectors[norms >= threshold] -= np.sign(vectors[norms >= threshold]) * epsilon
return vectors
def save(self, *args, **kwargs):
"""Save complete model to disk, inherited from :class:`gensim.utils.SaveLoad`."""
self._loss_grad = None # Can't pickle autograd fn to disk
super(PoincareModel, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Load model from disk, inherited from :class:`~gensim.utils.SaveLoad`."""
model = super(PoincareModel, cls).load(*args, **kwargs)
return model
def _prepare_training_batch(self, relations, all_negatives, check_gradients=False):
"""Creates training batch and computes gradients and loss for the batch.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
Contains node indices, computed gradients and loss for the batch.
"""
batch_size = len(relations)
indices_u, indices_v = [], []
for relation, negatives in zip(relations, all_negatives):
u, v = relation
indices_u.append(u)
indices_v.append(v)
indices_v.extend(negatives)
vectors_u = self.kv.syn0[indices_u]
vectors_v = self.kv.syn0[indices_v].reshape((batch_size, 1 + self.negative, self.size))
vectors_v = vectors_v.swapaxes(0, 1).swapaxes(1, 2)
batch = PoincareBatch(vectors_u, vectors_v, indices_u, indices_v, self.regularization_coeff)
batch.compute_all()
if check_gradients:
self._check_gradients(relations, all_negatives, batch)
return batch
def _check_gradients(self, relations, all_negatives, batch, tol=1e-8):
"""Compare computed gradients for batch to autograd gradients.
Parameters
----------
batch : PoincareBatch instance
Batch for which computed gradients are to checked.
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
"""
if not AUTOGRAD_PRESENT:
logger.warning('autograd could not be imported, cannot do gradient checking')
logger.warning('please install autograd to enable gradient checking')
return
if self._loss_grad is None:
self._loss_grad = grad(PoincareModel._loss_fn)
max_diff = 0.0
for i, (relation, negatives) in enumerate(zip(relations, all_negatives)):
u, v = relation
auto_gradients = self._loss_grad(
np.vstack((self.kv.syn0[u], self.kv.syn0[[v] + negatives])), self.regularization_coeff)
computed_gradients = np.vstack((batch.gradients_u[:, i], batch.gradients_v[:, :, i]))
diff = np.abs(auto_gradients - computed_gradients).max()
if diff > max_diff:
max_diff = diff
logger.info('Max difference between computed gradients and autograd gradients: %.10f', max_diff)
assert max_diff < tol, (
'Max difference between computed gradients and autograd gradients %.10f, '
'greater than tolerance %.10f' % (max_diff, tol))
def _sample_negatives_batch(self, nodes):
"""Return negative examples for each node in the given nodes.
Parameters
----------
nodes : list
List of node indices for which negative samples are to be returned.
Returns
-------
list of lists
Each inner list is a list of negative sample for a single node in the input list.
"""
all_indices = [self._sample_negatives(node) for node in nodes]
return all_indices
def _train_on_batch(self, relations, check_gradients=False):
"""Performs training for a single training batch.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
The batch that was just trained on, contains computed loss for the batch.
"""
all_negatives = self._sample_negatives_batch([relation[0] for relation in relations])
batch = self._prepare_training_batch(relations, all_negatives, check_gradients)
self._update_vectors_batch(batch)
return batch
@staticmethod
def _handle_duplicates(vector_updates, node_indices):
"""Handles occurrences of multiple updates to the same node in a batch of vector updates.
Parameters
----------
vector_updates : numpy.array
Array with each row containing updates to be performed on a certain node.
node_indices : list
Node indices on which the above updates are to be performed on.
Notes
-----
Mutates the `vector_updates` array.
Required because vectors[[2, 1, 2]] += np.array([-0.5, 1.0, 0.5]) performs only the last update
on the row at index 2.
"""
counts = Counter(node_indices)
for node_index, count in counts.items():
if count == 1:
continue
positions = [i for i, index in enumerate(node_indices) if index == node_index]
# Move all updates to the same node to the last such update, zeroing all the others
vector_updates[positions[-1]] = vector_updates[positions].sum(axis=0)
vector_updates[positions[:-1]] = 0
def _update_vectors_batch(self, batch):
"""Updates vectors for nodes in the given batch.
Parameters
----------
batch : :class:`~gensim.models.poincare.PoincareBatch`
Batch containing computed gradients and node indices of the batch for which updates are to be done.
"""
grad_u, grad_v = batch.gradients_u, batch.gradients_v
indices_u, indices_v = batch.indices_u, batch.indices_v
batch_size = len(indices_u)
u_updates = (self.alpha * (batch.alpha ** 2) / 4 * grad_u).T
self._handle_duplicates(u_updates, indices_u)
# XXX: here is uses the retraction to project the Riemannian gradient on the manifold
self.kv.syn0[indices_u] -= u_updates
self.kv.syn0[indices_u] = self._clip_vectors(self.kv.syn0[indices_u], self.epsilon)
v_updates = self.alpha * (batch.beta ** 2)[:, np.newaxis] / 4 * grad_v
v_updates = v_updates.swapaxes(1, 2).swapaxes(0, 1)
v_updates = v_updates.reshape(((1 + self.negative) * batch_size, self.size))
self._handle_duplicates(v_updates, indices_v)
# XXX: here is uses the retraction to project the Riemannian gradient on the manifold
self.kv.syn0[indices_v] -= v_updates
self.kv.syn0[indices_v] = self._clip_vectors(self.kv.syn0[indices_v], self.epsilon)
def train(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Trains Poincare embeddings using loaded data and model parameters.
Parameters
----------
batch_size : int, optional
Number of examples to train on in a single batch.
epochs : int
Number of iterations (epochs) over the corpus.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
Examples
--------
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
# Some divide-by-zero results are handled explicitly
old_settings = np.seterr(divide='ignore', invalid='ignore')
logger.info(
"training model of size %d with %d workers on %d relations for %d epochs and %d burn-in epochs, "
"using lr=%.5f burn-in lr=%.5f negative=%d",
self.size, self.workers, len(self.all_relations), epochs, self.burn_in,
self.alpha, self.burn_in_alpha, self.negative
)
if self.burn_in > 0 and not self._burn_in_done:
logger.info("Starting burn-in (%d epochs)----------------------------------------", self.burn_in)
self.alpha = self.burn_in_alpha
self._train_batchwise(
epochs=self.burn_in, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
self._burn_in_done = True
logger.info("Burn-in finished")
self.alpha = self.train_alpha
logger.info("Starting training (%d epochs)----------------------------------------", epochs)
self._train_batchwise(
epochs=epochs, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
logger.info("Training finished")
np.seterr(**old_settings)
def _train_batchwise(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Trains Poincare embeddings using specified parameters.
Parameters
----------
epochs : int
Number of iterations (epochs) over the corpus.
batch_size : int, optional
Number of examples to train on in a single batch.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
for epoch in range(1, epochs + 1):
indices = list(range(len(self.all_relations)))
self._np_random.shuffle(indices)
avg_loss = 0.0
last_time = time.time()
for batch_num, i in enumerate(range(0, len(indices), batch_size), start=1):
should_print = not (batch_num % print_every)
check_gradients = bool(check_gradients_every) and (batch_num % check_gradients_every) == 0
batch_indices = indices[i:i + batch_size]
relations = [self.all_relations[idx] for idx in batch_indices]
result = self._train_on_batch(relations, check_gradients=check_gradients)
avg_loss += result.loss
if should_print:
avg_loss /= print_every
time_taken = time.time() - last_time
speed = print_every * batch_size / time_taken
logger.info(
'Training on epoch %d, examples #%d-#%d, loss: %.2f'
% (epoch, i, i + batch_size, avg_loss))
logger.info(
'Time taken for %d examples: %.2f s, %.2f examples / s'
% (print_every * batch_size, time_taken, speed))
last_time = time.time()
avg_loss = 0.0
class PoincareBatch(object):
"""Compute Poincare distances, gradients and loss for a training batch.
Class for computing Poincare distances, gradients and loss for a training batch,
and storing intermediate state to avoid recomputing multiple times.
"""
def __init__(self, vectors_u, vectors_v, indices_u, indices_v, regularization_coeff=1.0):
"""
Initialize instance with sets of vectors for which distances are to be computed.
Parameters
----------
vectors_u : numpy.array
Vectors of all nodes `u` in the batch.
Expected shape (batch_size, dim).
vectors_v : numpy.array
Vectors of all positively related nodes `v` and negatively sampled nodes `v'`,
for each node `u` in the batch.
Expected shape (1 + neg_size, dim, batch_size).
indices_u : list
List of node indices for each of the vectors in `vectors_u`.
indices_v : list
Nested list of lists, each of which is a list of node indices
for each of the vectors in `vectors_v` for a specific node `u`.
regularization_coeff : float
Coefficient to use for l2-regularization
"""
self.vectors_u = vectors_u.T[np.newaxis, :, :] # (1, dim, batch_size)
self.vectors_v = vectors_v # (1 + neg_size, dim, batch_size)
self.indices_u = indices_u
self.indices_v = indices_v
self.regularization_coeff = regularization_coeff
self.poincare_dists = None
self.euclidean_dists = None
self.norms_u = None
self.norms_v = None
self.alpha = None
self.beta = None
self.gamma = None
self.gradients_u = None
self.distance_gradients_u = None
self.gradients_v = None
self.distance_gradients_v = None
self.loss = None
self._distances_computed = False
self._gradients_computed = False
self._distance_gradients_computed = False
self._loss_computed = False
def compute_all(self):
"""Convenience method to perform all computations."""
self.compute_distances()
self.compute_distance_gradients()
self.compute_gradients()
self.compute_loss()
def compute_distances(self):
"""Compute and store norms, euclidean distances and poincare distances between input vectors."""
if self._distances_computed:
return
euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
norms_u = np.linalg.norm(self.vectors_u, axis=1) # (1, batch_size)
norms_v = np.linalg.norm(self.vectors_v, axis=1) # (1 + neg_size, batch_size)
alpha = 1 - norms_u ** 2 # (1, batch_size)
beta = 1 - norms_v ** 2 # (1 + neg_size, batch_size)
gamma = 1 + 2 * (
(euclidean_dists ** 2) / (alpha * beta)
) # (1 + neg_size, batch_size)
poincare_dists = np.arccosh(gamma) # (1 + neg_size, batch_size)
exp_negative_distances = np.exp(-poincare_dists) # (1 + neg_size, batch_size)
Z = exp_negative_distances.sum(axis=0) # (batch_size)
self.euclidean_dists = euclidean_dists
self.poincare_dists = poincare_dists
self.exp_negative_distances = exp_negative_distances
self.Z = Z
self.gamma = gamma
self.norms_u = norms_u
self.norms_v = norms_v
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._distances_computed = True
def compute_gradients(self):
"""Compute and store gradients of loss function for all input vectors."""
if self._gradients_computed:
return
self.compute_distances()
self.compute_distance_gradients()
# (1 + neg_size, dim, batch_size)
gradients_v = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_v
gradients_v /= self.Z # (1 + neg_size, dim, batch_size)
gradients_v[0] += self.distance_gradients_v[0]
gradients_v[0] += self.regularization_coeff * 2 * self.vectors_v[0]
# (1 + neg_size, dim, batch_size)
gradients_u = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_u
gradients_u /= self.Z # (1 + neg_size, dim, batch_size)
gradients_u = gradients_u.sum(axis=0) # (dim, batch_size)
gradients_u += self.distance_gradients_u[0]
assert not np.isnan(gradients_u).any()
assert not np.isnan(gradients_v).any()
self.gradients_u = gradients_u
self.gradients_v = gradients_v
self._gradients_computed = True
def compute_distance_gradients(self):
"""Compute and store partial derivatives of poincare distance d(u, v) w.r.t all u and all v."""
if self._distance_gradients_computed:
return
self.compute_distances()
euclidean_dists_squared = self.euclidean_dists ** 2 # (1 + neg_size, batch_size)
# (1 + neg_size, 1, batch_size)
c_ = (4 / (self.alpha * self.beta * np.sqrt(self.gamma ** 2 - 1)))[:, np.newaxis, :]
# (1 + neg_size, 1, batch_size)
u_coeffs = ((euclidean_dists_squared + self.alpha) / self.alpha)[:, np.newaxis, :]
distance_gradients_u = u_coeffs * self.vectors_u - self.vectors_v # (1 + neg_size, dim, batch_size)
distance_gradients_u *= c_ # (1 + neg_size, dim, batch_size)
nan_gradients = self.gamma == 1 # (1 + neg_size, batch_size)
if nan_gradients.any():
distance_gradients_u.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_u = distance_gradients_u
# (1 + neg_size, 1, batch_size)
v_coeffs = ((euclidean_dists_squared + self.beta) / self.beta)[:, np.newaxis, :]
distance_gradients_v = v_coeffs * self.vectors_v - self.vectors_u # (1 + neg_size, dim, batch_size)
distance_gradients_v *= c_ # (1 + neg_size, dim, batch_size)
if nan_gradients.any():
distance_gradients_v.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_v = distance_gradients_v
self._distance_gradients_computed = True
def compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self.compute_distances()
self.loss = -np.log(self.exp_negative_distances[0] / self.Z).sum() # scalar
self._loss_computed = True
class PoincareKeyedVectors(BaseKeyedVectors):
"""Class to contain vectors and vocab for the :class:`~gensim.models.poincare.PoincareModel` training class.
Used to perform operations on the vectors such as vector lookup, distance etc.
"""
def __init__(self, vector_size):
super(PoincareKeyedVectors, self).__init__(vector_size)
self.max_distance = 0
self.index2word = []
@property
def vectors(self):
return self.syn0
@vectors.setter
def vectors(self, value):
self.syn0 = value
@property
def index2entity(self):
return self.index2word
@index2entity.setter
def index2entity(self, value):
self.index2word = value
def word_vec(self, word):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model.word_vec('office')
array([ -1.40128313e-02, ...])
"""
return super(PoincareKeyedVectors, self).get_vector(word)
def words_closer_than(self, w1, w2):
"""
Returns all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
>>> model.words_closer_than('carnivore.n.01', 'mammal.n.01')
['dog.n.01', 'canine.n.02']
"""
return super(PoincareKeyedVectors, self).closer_than(w1, w2)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
`fname` is the file used to save the vectors in
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
`total_vec` is an optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
_save_word2vec_format(fname, self.vocab, self.syn0, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
return _load_word2vec_format(
PoincareKeyedVectors, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
@staticmethod
def vector_distance(vector_1, vector_2):
"""
Return poincare distance between two input vectors. Convenience method over `vector_distance_batch`.
Parameters
----------
vector_1 : numpy.array
input vector
vector_2 : numpy.array
input vector
Returns
-------
numpy.float
Poincare distance between `vector_1` and `vector_2`.
"""
return PoincareKeyedVectors.vector_distance_batch(vector_1, vector_2[np.newaxis, :])[0]
@staticmethod
def vector_distance_batch(vector_1, vectors_all):
"""
Return poincare distances between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which Poincare distances are to be computed.
expected shape (dim,)
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed.
expected shape (num_vectors, dim)
Returns
-------
numpy.array
Contains Poincare distance between vector_1 and each row in vectors_all.
shape (num_vectors,)
"""
euclidean_dists = np.linalg.norm(vector_1 - vectors_all, axis=1)
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
return np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
def closest_child(self, node):
"""
Returns the node closest to `node` that is lower in the hierarchy than `node`.
Parameters
----------
node : str or int
Key for node for which closest child is to be found.
Returns
-------
str or None
Node closest to `node` that is lower in the hierarchy than `node`.
If there are no nodes lower in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm >= all_norms
if mask.all(): # No nodes lower in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def closest_parent(self, node):
"""
Returns the node closest to `node` that is higher in the hierarchy than `node`.
Parameters
----------
node : str or int
Key for node for which closest parent is to be found.
Returns
-------
str or None
Node closest to `node` that is higher in the hierarchy than `node`.
If there are no nodes higher in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm <= all_norms
if mask.all(): # No nodes higher in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def descendants(self, node, max_depth=5):
"""
Returns the list of recursively closest children from the given node, upto a max depth of `max_depth`.
Parameters
----------
node : str or int
Key for node for which descendants are to be found.
max_depth : int
Maximum number of descendants to return.
Returns
-------
list (str)
Descendant nodes from the node `node`.
"""
depth = 0
descendants = []
current_node = node
while depth < max_depth:
descendants.append(self.closest_child(current_node))
current_node = descendants[-1]
depth += 1
return descendants
def ancestors(self, node):
"""
Returns the list of recursively closest parents from the given node.
Parameters
----------
node : str or int
Key for node for which ancestors are to be found.
Returns
-------
list (str)
Ancestor nodes of the node `node`.
"""
ancestors = []
current_node = node
ancestor = self.closest_parent(current_node)
while ancestor is not None:
ancestors.append(ancestor)
ancestor = self.closest_parent(ancestors[-1])
return ancestors
def distance(self, w1, w2):
"""
Return Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : str or int
Key for first node.
w2 : str or int
Key for second node.
Returns
-------
float
Poincare distance between the vectors for nodes `w1` and `w2`.
Examples
--------
>>> model.distance('mammal.n.01', 'carnivore.n.01')
2.13
Notes
-----
Raises KeyError if either of `w1` and `w2` is absent from vocab.
"""
vector_1 = self.word_vec(w1)
vector_2 = self.word_vec(w2)
return self.vector_distance(vector_1, vector_2)
def similarity(self, w1, w2):
"""
Return similarity based on Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : str or int
Key for first node.
w2 : str or int
Key for second node.
Returns
-------
float
Similarity between the between the vectors for nodes `w1` and `w2` (between 0 and 1).
Examples
--------
>>> model.similarity('mammal.n.01', 'carnivore.n.01')
0.73
Notes
-----
Raises KeyError if either of `w1` and `w2` is absent from vocab.
Similarity lies between 0 and 1.
"""
return 1 / (1 + self.distance(w1, w2))
def most_similar(self, node_or_vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar nodes to the given node or vector, sorted in increasing order of distance.
Parameters
----------
node_or_vector : str/int or numpy.array
node key or vector for which similar nodes are to be found.
topn : int or None, optional
number of similar nodes to return, if `None`, returns all.
restrict_vocab : int or None, optional
Optional integer which limits the range of vectors which are searched for most-similar values.
For example, restrict_vocab=10000 would only check the first 10000 node vectors in the vocabulary order.
This may be meaningful if vocabulary is sorted by descending frequency.
Returns
--------
list of tuples (str, float)
List of tuples containing (node, distance) pairs in increasing order of distance.
Examples
--------
>>> vectors.most_similar('lion.n.01')
[('lion_cub.n.01', 0.4484), ('lionet.n.01', 0.6552), ...]
"""
if not restrict_vocab:
all_distances = self.distances(node_or_vector)
else:
nodes_to_use = self.index2word[:restrict_vocab]
all_distances = self.distances(node_or_vector, nodes_to_use)
if isinstance(node_or_vector, string_types + (int,)):
node_index = self.vocab[node_or_vector].index
else:
node_index = None
if not topn:
closest_indices = matutils.argsort(all_distances)
else:
closest_indices = matutils.argsort(all_distances, topn=1 + topn)
result = [
(self.index2word[index], float(all_distances[index]))
for index in closest_indices if (not node_index or index != node_index) # ignore the input node
]
if topn:
result = result[:topn]
return result
def distances(self, node_or_vector, other_nodes=()):
"""
Compute Poincare distances from given node or vector to all nodes in `other_nodes`.
If `other_nodes` is empty, return distance between `node_or_vector` and all nodes in vocab.
Parameters
----------
node_or_vector : str/int or numpy.array
Node key or vector from which distances are to be computed.
other_nodes : iterable of str/int or None
For each node in `other_nodes` distance from `node_or_vector` is computed.
If None or empty, distance of `node_or_vector` from all nodes in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all nodes in `other_nodes` from input `node_or_vector`,
in the same order as `other_nodes`.
Examples
--------
>>> model.distances('mammal.n.01', ['carnivore.n.01', 'dog.n.01'])
np.array([2.1199, 2.0710]
>>> model.distances('mammal.n.01')
np.array([0.43753847, 3.67973852, ..., 6.66172886])
Notes
-----
Raises KeyError if either `node_or_vector` or any node in `other_nodes` is absent from vocab.
"""
if isinstance(node_or_vector, string_types):
input_vector = self.word_vec(node_or_vector)
else:
input_vector = node_or_vector
if not other_nodes:
other_vectors = self.syn0
else:
other_indices = [self.vocab[node].index for node in other_nodes]
other_vectors = self.syn0[other_indices]
return self.vector_distance_batch(input_vector, other_vectors)
def norm(self, node_or_vector):
"""
Return absolute position in hierarchy of input node or vector.
Values range between 0 and 1. A lower value indicates the input node or vector is higher in the hierarchy.
Parameters
----------
node_or_vector : str/int or numpy.array
Input node key or vector for which position in hierarchy is to be returned.
Returns
-------
float
Absolute position in the hierarchy of the input vector or node.
Examples
--------
>>> model.norm('mammal.n.01')
0.9
Notes
-----
The position in hierarchy is based on the norm of the vector for the node.
"""
if isinstance(node_or_vector, string_types):
input_vector = self.word_vec(node_or_vector)
else:
input_vector = node_or_vector
return np.linalg.norm(input_vector)
def difference_in_hierarchy(self, node_or_vector_1, node_or_vector_2):
"""
Relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
A positive value indicates `node_or_vector_1` is higher in the hierarchy than `node_or_vector_2`.
Parameters
----------
node_or_vector_1 : str/int or numpy.array
Input node key or vector.
node_or_vector_2 : str/int or numpy.array
Input node key or vector.
Returns
-------
float
Relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
Examples
--------
>>> model.difference_in_hierarchy('mammal.n.01', 'dog.n.01')
0.51
>>> model.difference_in_hierarchy('dog.n.01', 'mammal.n.01')
-0.51
Notes
-----
The returned value can be positive or negative, depending on whether `node_or_vector_1` is higher
or lower in the hierarchy than `node_or_vector_2`.
"""
return self.norm(node_or_vector_2) - self.norm(node_or_vector_1)
class PoincareRelations(object):
"""Class to stream relations for `PoincareModel` from a tsv-like file."""
def __init__(self, file_path, encoding='utf8', delimiter='\t'):
"""Initialize instance from file containing a pair of nodes (a relation) per line.
Parameters
----------
file_path : str
Path to file containing a pair of nodes (a relation) per line, separated by `delimiter`.
encoding : str, optional
Character encoding of the input file.
delimiter : str, optional
Delimiter character for each relation.
"""
self.file_path = file_path
self.encoding = encoding
self.delimiter = delimiter
def __iter__(self):
"""Streams relations from self.file_path decoded into unicode strings.
Yields
-------
2-tuple (unicode, unicode)
Relation from input file.
"""
with smart_open(self.file_path) as file_obj:
if sys.version_info[0] < 3:
lines = file_obj
else:
lines = (l.decode(self.encoding) for l in file_obj)
# csv.reader requires bytestring input in python2, unicode input in python3
reader = csv.reader(lines, delimiter=self.delimiter)
for row in reader:
if sys.version_info[0] < 3:
row = [value.decode(self.encoding) for value in row]
yield tuple(row)
class NegativesBuffer(object):
"""Class to buffer and return negative samples."""
def __init__(self, items):
"""Initialize instance from list or numpy array of samples.
Parameters
----------
items : list/numpy.array
List or array containing negative samples.
"""
self._items = items
self._current_index = 0
def num_items(self):
"""Returns number of items remaining in the buffer.
Returns
-------
int
Number of items in the buffer that haven't been consumed yet.
"""
return len(self._items) - self._current_index
def get_items(self, num_items):
"""Returns next `num_items` from buffer.
Parameters
----------
num_items : int
Number of items to fetch.
Returns
-------
numpy.array or list
Slice containing `num_items` items from the original data.
Notes
-----
No error is raised if less than `num_items` items are remaining,
simply all the remaining items are returned.
"""
start_index = self._current_index
end_index = start_index + num_items
self._current_index += num_items
return self._items[start_index:end_index]
class ReconstructionEvaluation(object):
"""Evaluating reconstruction on given network for given embedding."""
def __init__(self, file_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
file_path : str
Path to tsv file containing relation pairs.
embedding : PoincareKeyedVectors instance
Embedding to be evaluated.
"""
items = set()
embedding_vocab = embedding.vocab
relations = defaultdict(set)
with smart_open(file_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding_vocab[row[0]].index
item_2_index = embedding_vocab[row[1]].index
relations[item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_positive_relation_ranks_and_avg_prec(all_distances, positive_relations):
"""
Given a numpy array of all distances from an item and indices of its positive relations,
compute ranks and Average Precision of positive relations.
Parameters
----------
all_distances : numpy.array (float)
Array of all distances (floats) for a specific item.
positive_relations : list
List of indices of positive relations for the item.
Returns
-------
tuple (list, float)
The list contains ranks (int) of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking.
e.g. ([1, 2, 3, 20], 0.610).
"""
positive_relation_distances = all_distances[positive_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[positive_relations] = True
# Compute how many negative relation distances are less than each positive relation distance, plus 1 for rank
ranks = (negative_relation_distances < positive_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the reconstruction task.
Parameters
----------
max_n : int or None
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict
Contains (metric_name, metric_value) pairs.
e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for reconstruction.
Parameters
----------
max_n : int or None
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
tuple (float, float)
Contains (mean_rank, MAP).
e.g (50.3, 0.31)
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations:
continue
item_relations = list(self.relations[item])
item_term = self.embedding.index2word[item]
item_distances = self.embedding.distances(item_term)
positive_relation_ranks, avg_precision = \
self.get_positive_relation_ranks_and_avg_prec(item_distances, item_relations)
ranks += positive_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LinkPredictionEvaluation(object):
"""Evaluating reconstruction on given network for given embedding."""
def __init__(self, train_path, test_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
train_path : str
Path to tsv file containing relation pairs used for training.
test_path : str
Path to tsv file containing relation pairs to evaluate.
embedding : PoincareKeyedVectors instance
Embedding to be evaluated.
"""
items = set()
embedding_vocab = embedding.vocab
relations = {'known': defaultdict(set), 'unknown': defaultdict(set)}
data_files = {'known': train_path, 'unknown': test_path}
for relation_type, data_file in data_files.items():
with smart_open(data_file, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding_vocab[row[0]].index
item_2_index = embedding_vocab[row[1]].index
relations[relation_type][item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_unknown_relation_ranks_and_avg_prec(all_distances, unknown_relations, known_relations):
"""
Given a numpy array of distances and indices of known and unknown positive relations,
compute ranks and Average Precision of unknown positive relations.
Parameters
----------
all_distances : numpy.array (float)
Array of all distances for a specific item.
unknown_relations : list
List of indices of unknown positive relations.
known_relations : list
List of indices of known positive relations.
Returns
-------
tuple (list, float)
The list contains ranks (int) of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking.
e.g. ([1, 2, 3, 20], 0.610).
"""
unknown_relation_distances = all_distances[unknown_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[unknown_relations] = True
negative_relation_distances.mask[known_relations] = True
# Compute how many negative relation distances are less than each unknown relation distance, plus 1 for rank
ranks = (negative_relation_distances < unknown_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the link prediction task.
Parameters
----------
max_n : int or None
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict
Contains (metric_name, metric_value) pairs.
e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for link prediction.
Parameters
----------
max_n : int or None
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
tuple (float, float)
Contains (mean_rank, MAP).
e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations['unknown']: # No positive relations to predict for this node
continue
unknown_relations = list(self.relations['unknown'][item])
known_relations = list(self.relations['known'][item])
item_term = self.embedding.index2word[item]
item_distances = self.embedding.distances(item_term)
unknown_relation_ranks, avg_precision = \
self.get_unknown_relation_ranks_and_avg_prec(item_distances, unknown_relations, known_relations)
ranks += unknown_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LexicalEntailmentEvaluation(object):
"""Evaluating reconstruction on given network for any embedding."""
def __init__(self, filepath):
"""Initialize evaluation instance with HyperLex text file containing relation pairs.
Parameters
----------
filepath : str
Path to HyperLex text file.
"""
expected_scores = {}
with smart_open(filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=' ')
for row in reader:
word_1, word_2 = row['WORD1'], row['WORD2']
expected_scores[(word_1, word_2)] = float(row['AVG_SCORE'])
self.scores = expected_scores
self.alpha = 1000
def score_function(self, embedding, trie, term_1, term_2):
"""
Given an embedding and two terms, return the predicted score for them -
extent to which `term_1` is a type of `term_2`.
Parameters
----------
embedding : PoincareKeyedVectors instance
Embedding to use for computing predicted score.
trie : pygtrie.Trie instance
Trie to use for finding matching vocab terms for input terms.
term_1 : str
Input term.
term_2 : str
Input term.
Returns
-------
float
Predicted score (the extent to which `term_1` is a type of `term_2`).
"""
try:
word_1_terms = self.find_matching_terms(trie, term_1)
word_2_terms = self.find_matching_terms(trie, term_2)
except KeyError:
raise ValueError("No matching terms found for either %s or %s" % (term_1, term_2))
min_distance = np.inf
min_term_1, min_term_2 = None, None
for term_1 in word_1_terms:
for term_2 in word_2_terms:
distance = embedding.distance(term_1, term_2)
if distance < min_distance:
min_term_1, min_term_2 = term_1, term_2
min_distance = distance
assert min_term_1 is not None and min_term_2 is not None
vector_1, vector_2 = embedding.word_vec(min_term_1), embedding.word_vec(min_term_2)
norm_1, norm_2 = np.linalg.norm(vector_1), np.linalg.norm(vector_2)
return -1 * (1 + self.alpha * (norm_2 - norm_1)) * min_distance
@staticmethod
def find_matching_terms(trie, word):
"""
Given a trie and a word, find terms in the trie beginning with the word.
Parameters
----------
trie : pygtrie.Trie instance
Trie to use for finding matching terms.
word : str
Input word to use for prefix search.
Returns
-------
list (str)
List of matching terms.
"""
matches = trie.items('%s.' % word)
matching_terms = [''.join(key_chars) for key_chars, value in matches]
return matching_terms
@staticmethod
def create_vocab_trie(embedding):
"""Create trie with vocab terms of the given embedding to enable quick prefix searches.
Parameters
----------
embedding : PoincareKeyedVectors instance
Embedding for which trie is to be created.
Returns
-------
pygtrie.Trie instance
Trie containing vocab terms of the input embedding.
"""
try:
from pygtrie import Trie
except ImportError:
raise ImportError(
'pygtrie could not be imported, please install pygtrie in order to use LexicalEntailmentEvaluation')
vocab_trie = Trie()
for key in embedding.vocab:
vocab_trie[key] = True
return vocab_trie
def evaluate_spearman(self, embedding):
"""Evaluate spearman scores for lexical entailment for given embedding.
Parameters
----------
embedding : PoincareKeyedVectors instance
Embedding for which evaluation is to be done.
Returns
-------
float
Spearman correlation score for the task for input embedding.
"""
predicted_scores = []
expected_scores = []
skipped = 0
count = 0
vocab_trie = self.create_vocab_trie(embedding)
for (word_1, word_2), expected_score in self.scores.items():
try:
predicted_score = self.score_function(embedding, vocab_trie, word_1, word_2)
except ValueError:
skipped += 1
continue
count += 1
predicted_scores.append(predicted_score)
expected_scores.append(expected_score)
print('Skipped pairs: %d out of %d' % (skipped, len(self.scores)))
spearman = spearmanr(expected_scores, predicted_scores)
return spearman.correlation
| 66,285 | 37.718458 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/logentropy_model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module allows simple Bag of Words (BoW) represented corpus to be transformed into log entropy space.
It implements Log Entropy Model that produces entropy-weighted logarithmic term frequency representation.
Empirical study by Lee et al. 2015 [1]_ suggests log entropy-weighted model yields better results among other forms of
representation.
References
----------
.. [1] Lee et al. 2005. An Empirical Evaluation of Models of Text Document Similarity.
https://escholarship.org/uc/item/48g155nq
"""
import logging
import math
from gensim import interfaces, matutils, utils
logger = logging.getLogger(__name__)
class LogEntropyModel(interfaces.TransformationABC):
"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted matrix (positive floats).
This is done by a log entropy normalization, optionally normalizing the resulting documents to unit length.
The following formulas explain how o compute the log entropy weight for term :math:`i` in document :math:`j`:
.. math::
local\_weight_{i,j} = log(frequency_{i,j} + 1)
P_{i,j} = \\frac{frequency_{i,j}}{\sum_j frequency_{i,j}}
global\_weight_i = 1 + \\frac{\sum_j P_{i,j} * log(P_{i,j})}{log(number\_of\_documents + 1)}
final\_weight_{i,j} = local\_weight_{i,j} * global\_weight_i
Examples
--------
>>> from gensim.models import LogEntropyModel
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>>
>>> dct = Dictionary(common_texts) # fit dictionary
>>> corpus = [dct.doc2bow(row) for row in common_texts] # convert to BoW format
>>> model = LogEntropyModel(corpus) # fit model
>>> vector = model[corpus[1]] # apply model to document
"""
def __init__(self, corpus, normalize=True):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus in BoW format.
normalize : bool, optional
If True, the resulted log entropy weighted vector will be normalized to length of 1,
If False - do nothing.
"""
self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "LogEntropyModel(n_docs=%s, n_words=%s)" % (self.n_docs, self.n_words)
def initialize(self, corpus):
"""Calculates the global weighting for all terms in a given corpus and transforms the simple
count representation into the log entropy normalized space.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus is BoW format
"""
logger.info("calculating counts")
glob_freq = {}
glob_num_words, doc_no = 0, -1
for doc_no, bow in enumerate(corpus):
if doc_no % 10000 == 0:
logger.info("PROGRESS: processing document #%i", doc_no)
glob_num_words += len(bow)
for term_id, term_count in bow:
glob_freq[term_id] = glob_freq.get(term_id, 0) + term_count
# keep some stats about the training corpus
self.n_docs = doc_no + 1
self.n_words = glob_num_words
# and finally compute the global weights
logger.info(
"calculating global log entropy weights for %i documents and %i features (%i matrix non-zeros)",
self.n_docs, len(glob_freq), self.n_words
)
logger.debug('iterating over corpus')
for doc_no2, bow in enumerate(corpus):
for key, freq in bow:
p = (float(freq) / glob_freq[key]) * math.log(float(freq) / glob_freq[key])
self.entr[key] = self.entr.get(key, 0.0) + p
if doc_no2 != doc_no:
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = 1 + self.entr[key] / math.log(self.n_docs + 1)
def __getitem__(self, bow):
"""Get log entropy representation of the input vector and/or corpus.
Parameters
----------
bow : list of (int, int)
Document in BoW format.
Returns
-------
list of (int, float)
Log-entropy vector for passed `bow`.
"""
# if the input vector is in fact a corpus, return a transformed corpus
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge)
vector = [
(term_id, math.log(tf + 1) * self.entr.get(term_id))
for term_id, tf in bow
if term_id in self.entr
]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
| 5,158 | 33.858108 | 118 | py |
poincare_glove | poincare_glove-master/gensim/models/atmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Olavur Mortensen <olavurmortensen@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Author-topic model in Python.
This module trains the author-topic model on documents and corresponding author-document
dictionaries. The training is online and is constant in memory w.r.t. the number of
documents. The model is *not* constant in memory w.r.t. the number of authors.
The model can be updated with additional documents after training has been completed. It is
also possible to continue training on the existing data.
The model is closely related to Latent Dirichlet Allocation. The AuthorTopicModel class
inherits the LdaModel class, and its usage is thus similar.
Distributed computation and multiprocessing is not implemented at the moment, but may be
coming in the future.
The model was introduced by Rosen-Zvi and co-authors in 2004
(https://mimno.infosci.cornell.edu/info6150/readings/398.pdf).
A tutorial can be found at
https://github.com/RaRe-Technologies/gensim/tree/develop/docs/notebooks/atmodel_tutorial.ipynb.
"""
# TODO: this class inherits LdaModel and overwrites some methods. There is some code
# duplication still, and a refactor could be made to avoid this. Comments with "TODOs"
# are included in the code where this is the case, for example in the log_perplexity
# and do_estep methods.
import logging
import numpy as np # for arrays, array broadcasting etc.
from copy import deepcopy
from shutil import copyfile
from os.path import isfile
from os import remove
from gensim import utils
from gensim.models import LdaModel
from gensim.models.ldamodel import LdaState
from gensim.matutils import dirichlet_expectation
from gensim.corpora import MmCorpus
from itertools import chain
from scipy.special import gammaln # gamma function utils
from six.moves import xrange
import six
logger = logging.getLogger('gensim.models.atmodel')
class AuthorTopicState(LdaState):
"""
NOTE: distributed mode not available yet in the author-topic model. This AuthorTopicState
object is kept so that when the time comes to imlement it, it will be easier.
Encapsulate information for distributed computation of AuthorTopicModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, lambda_shape, gamma_shape):
self.eta = eta
self.sstats = np.zeros(lambda_shape)
self.gamma = np.zeros(gamma_shape)
self.numdocs = 0
self.dtype = np.float64 # To be compatible with LdaState
def construct_doc2author(corpus, author2doc):
"""Make a mapping from document IDs to author IDs."""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
def construct_author2doc(doc2author):
"""Make a mapping from author IDs to document IDs."""
# First get a set of all authors.
authors_ids = set()
for d, a_doc_ids in doc2author.items():
for a in a_doc_ids:
authors_ids.add(a)
# Now construct the dictionary.
author2doc = {}
for a in authors_ids:
author2doc[a] = []
for d, a_ids in doc2author.items():
if a in a_ids:
author2doc[a].append(d)
return author2doc
class AuthorTopicModel(LdaModel):
"""
The constructor estimates the author-topic model parameters based
on a training corpus:
>>> model = AuthorTopicModel(corpus, num_topics=10, author2doc=author2doc, id2word=id2word)
The model can be updated (trained) with new documents via
>>> model.update(other_corpus, other_author2doc)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
If the iterable corpus and one of author2doc/doc2author dictionaries are given,
start training straight away. If not given, the model is left untrained
(presumably because you want to call the `update` method manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`author2doc` is a dictionary where the keys are the names of authors, and the
values are lists of documents that the author contributes to.
`doc2author` is a dictionary where the keys are document IDs (indexes to corpus)
and the values are lists of author names. I.e. this is the reverse mapping of
`author2doc`. Only one of the two, `author2doc` and `doc2author` have to be
supplied.
`passes` is the number of times the model makes a pass over the entire trianing
data.
`iterations` is the maximum number of times the model loops over each document
(M-step). The iterations stop when convergence is reached.
`chunksize` controls the size of the mini-batches.
`alpha` and `eta` are hyperparameters that affect sparsity of the author-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value 'auto', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates. Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively. `decay` controls how quickly old documents are
forgotten, while `offset` down-weights early iterations.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be an integer or a numpy.random.RandomState object. Set the
state of the random number generator inside the author-topic model, to ensure
reproducibility of your experiments, for example.
`serialized` indicates whether the input corpora to the model are simple
in-memory lists (`serialized = False`) or saved to the hard-drive
(`serialized = True`). Note that this behaviour is quite different from
other Gensim models. If your data is too large to fit in to memory, use
this functionality. Note that calling `AuthorTopicModel.update` with new
data may be cumbersome as it requires all the existing data to be
re-serialized.
`serialization_path` must be set to a filepath, if `serialized = True` is
used. Use, for example, `serialization_path = /tmp/serialized_model.mm` or use your
working directory by setting `serialization_path = serialized_model.mm`. An existing
file *cannot* be overwritten; either delete the old file or choose a different
name.
Example:
>>> model = AuthorTopicModel(corpus, num_topics=100, author2doc=author2doc, id2word=id2word) # train model
>>> model.update(corpus2) # update the author-topic model with additional documents
>>> model = AuthorTopicModel(
... corpus, num_topics=50, author2doc=author2doc, id2word=id2word, alpha='auto', eval_every=5)
"""
# NOTE: this doesn't call constructor of a base class, but duplicates most of this code
# so we have to set dtype to float64 default here
self.dtype = np.float64
# NOTE: as distributed version of this model is not implemented, "distributed" is set to false. Some of the
# infrastructure to implement a distributed author-topic model is already in place,
# such as the AuthorTopicState.
distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish input space dimensionality"
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute the author-topic model over an empty collection (no terms)")
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if serialized and not serialization_path:
raise ValueError(
"If serialized corpora are used, a the path to a folder "
"where the corpus should be saved must be provided (serialized_path)."
)
if serialized and serialization_path:
assert not isfile(serialization_path), \
"A file already exists at the serialization_path path; " \
"choose a different serialization_path, or delete the file."
self.serialization_path = serialization_path
# Initialize an empty self.corpus.
self.init_empty_corpus()
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
if isinstance(eta, six.string_types):
if eta == 'asymmetric':
raise ValueError("The 'asymmetric' option cannot be used for eta")
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms)
)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# Initialize the variational distributions q(beta|lambda) and q(theta|gamma)
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None and (author2doc is not None or doc2author is not None):
use_numpy = self.dispatcher is not None
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
def __str__(self):
return "AuthorTopicModel(num_terms=%s, num_topics=%s, num_authors=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.num_authors, self.decay, self.chunksize)
def init_empty_corpus(self):
"""
Initialize an empty corpus. If the corpora are to be treated as lists, simply
initialize an empty list. If serialization is used, initialize an empty corpus
of the class `gensim.corpora.MmCorpus`.
"""
if self.serialized:
# Initialize the corpus as a serialized empty list.
# This corpus will be extended in self.update.
MmCorpus.serialize(self.serialization_path, []) # Serialize empty corpus.
self.corpus = MmCorpus(self.serialization_path) # Store serialized corpus object in self.corpus.
else:
# All input corpora are assumed to just be lists.
self.corpus = []
def extend_corpus(self, corpus):
"""
Add new documents in `corpus` to `self.corpus`. If serialization is used,
then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list
of documents, is simply extended.
"""
if self.serialized:
# Re-serialize the entire corpus while appending the new documents.
if isinstance(corpus, MmCorpus):
# Check that we are not attempting to overwrite the serialized corpus.
assert self.corpus.input != corpus.input, \
'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus) # A generator with the old and new documents.
# Make a temporary copy of the file where the corpus is serialized.
copyfile(self.serialization_path, self.serialization_path + '.tmp')
self.corpus.input = self.serialization_path + '.tmp' # Point the old corpus at this temporary file.
# Re-serialize the old corpus, and extend it with the new corpus.
MmCorpus.serialize(self.serialization_path, corpus_chain)
self.corpus = MmCorpus(self.serialization_path) # Store the new serialized corpus object in self.corpus.
remove(self.serialization_path + '.tmp') # Remove the temporary file again.
else:
# self.corpus and corpus are just lists, just extend the list.
# First check that corpus is actually a list.
assert isinstance(corpus, list), "If serialized == False, all input corpora must be lists."
self.corpus.extend(corpus)
def compute_phinorm(self, expElogthetad, expElogbetad):
"""Efficiently computes the normalizing factor in phi."""
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = expElogtheta_sum.dot(expElogbetad) + 1e-100
return phinorm
def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
"""
Given a chunk of sparse document vectors, update gamma (parameters
controlling the topic weights) for each author corresponding to the
documents in the chunk.
The whole input chunk of document is assumed to fit in RAM; chunking of
a large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`.
`gamma_cunk` is of shape `len(chunk_authors) x self.num_topics`, where
`chunk_authors` is the number of authors in the documents in the
current chunk.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Stack all the computed gammas into this output array.
gamma_chunk = np.zeros((0, self.num_topics))
# Now, for each document d update gamma and phi w.r.t. all authors in those documents.
for d, doc in enumerate(chunk):
if chunk_doc_idx is not None:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get the IDs and counts of all the words in the current document.
# TODO: this is duplication of code in LdaModel. Refactor.
if doc and not isinstance(doc[0][0], six.integer_types + (np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
cts = np.array([cnt for _, cnt in doc])
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
gammad = self.state.gamma[authors_d, :] # gamma of document d before update.
tilde_gamma = gammad.copy() # gamma that will be updated.
# Compute the expectation of the log of the Dirichlet parameters theta and beta.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
# Compute the normalizing constant of phi for the current document.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Iterate between gamma and phi until convergence
for _ in xrange(self.iterations):
lastgamma = tilde_gamma.copy()
# Update gamma.
# phi is computed implicitly below,
for ai, a in enumerate(authors_d):
tilde_gamma[ai, :] = self.alpha + len(self.author2doc[self.id2author[a]])\
* expElogthetad[ai, :] * np.dot(cts / phinorm, expElogbetad.T)
# Update gamma.
# Interpolation between document d's "local" gamma (tilde_gamma),
# and "global" gamma (gammad).
tilde_gamma = (1 - rhot) * gammad + rhot * tilde_gamma
# Update Elogtheta and Elogbeta, since gamma and lambda have been updated.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
# Update the normalizing constant in phi.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Check for convergence.
# Criterion is mean change in "local" gamma.
meanchange_gamma = np.mean(abs(tilde_gamma - lastgamma))
gamma_condition = meanchange_gamma < self.gamma_threshold
if gamma_condition:
converged += 1
break
# End of iterations loop.
# Store the updated gammas in the model state.
self.state.gamma[authors_d, :] = tilde_gamma
# Stack the new gammas into the output array.
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, cts / phinorm)
if len(chunk) > 1:
logger.debug(
"%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations
)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * \sum_a phi_{dwak}
# = \sum_d n_{dw} * exp{Elogtheta_{ak} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma_chunk, sstats
def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
# TODO: this method is somewhat similar to the one in LdaModel. Refactor if possible.
if state is None:
state = self.state
gamma, sstats = self.inference(
chunk, author2doc, doc2author, rhot,
collect_sstats=True, chunk_doc_idx=chunk_doc_idx
)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
"""
Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.
"""
# TODO: This method is very similar to the one in LdaModel. Refactor.
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / \
(subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
If update is called with authors that already exist in the model, it will
resume training on not only new documents for that author, but also the
previously seen documents. This is necessary for those authors' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are
to appended to all the previously seen documents, and author2doc is
combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
`update()`.
It is not possible to add new authors to existing documents, as all
documents in `corpus` are assumed to be new documents.
Args:
corpus (gensim corpus): The corpus with which the author-topic model should be updated.
author2doc (dict): author to document mapping corresponding to indexes in input
corpus.
doc2author (dict): document to author mapping corresponding to indexes in input
corpus.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`AuthorTopicModel` constructor.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# TODO: if deepcopy is not used here, something goes wrong. When unit tests are run (specifically "testPasses"),
# the process simply gets killed.
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
# TODO: it is not possible to add new authors to an existing document (all input documents are treated
# as completely new documents). Perhaps this functionality could be implemented.
# If it's absolutely necessary, the user can delete the documents that have new authors, and call update
# on them with the new and old authors.
if corpus is None:
# Just keep training on the already available data.
# Assumes self.update() has been called before with input documents and corresponding authors.
assert self.total_docs > 0, 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in xrange(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if doc2author is None and author2doc is None:
raise ValueError(
'at least one of author2doc/doc2author must be specified, to establish input space dimensionality'
)
# If either doc2author or author2doc is missing, construct them from the other.
if doc2author is None:
doc2author = construct_doc2author(corpus, author2doc)
elif author2doc is None:
author2doc = construct_author2doc(doc2author)
# Number of authors that need to be updated.
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
logger.warning("AuthorTopicModel.update() called with an empty corpus")
return
self.total_docs += len_input_corpus
# Add new documents in corpus to self.corpus.
self.extend_corpus(corpus)
# Obtain a list of new authors.
new_authors = []
# Sorting the author names makes the model more reproducible.
for a in sorted(author2doc.keys()):
if not self.author2doc.get(a):
new_authors.append(a)
num_new_authors = len(new_authors)
# Add new authors do author2id/id2author dictionaries.
for a_id, a_name in enumerate(new_authors):
self.author2id[a_name] = a_id + self.num_authors
self.id2author[a_id + self.num_authors] = a_name
# Increment the number of total authors seen.
self.num_authors += num_new_authors
# Initialize the variational distributions q(theta|gamma)
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Combine author2doc with self.author2doc.
# First, increment the document IDs by the number of previously seen documents.
for a, doc_ids in author2doc.items():
doc_ids = [d + self.total_docs - len_input_corpus for d in doc_ids]
# For all authors in the input corpus, add the new documents.
for a, doc_ids in author2doc.items():
if self.author2doc.get(a):
# This is not a new author, append new documents.
self.author2doc[a].extend(doc_ids)
else:
# This is a new author, create index.
self.author2doc[a] = doc_ids
# Add all new documents to self.doc2author.
for d, a_list in doc2author.items():
self.doc2author[d] = a_list
# Train on all documents of authors in input_corpus.
train_corpus_idx = []
for _ in author2doc.keys(): # For all authors in input corpus.
for doc_ids in self.author2doc.values(): # For all documents in total corpus.
train_corpus_idx.extend(doc_ids)
# Make the list of training documents unique.
train_corpus_idx = list(set(train_corpus_idx))
# train_corpus_idx is only a list of indexes, so "len" is valid.
lencorpus = len(train_corpus_idx)
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s author-topic training, %s topics, %s authors, "
"%i passes over the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, num_input_authors, passes, lencorpus, updateafter,
evalafter, iterations, gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
# gamma is not needed in "other", thus its shape is (0, 0).
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for chunk_no, chunk_doc_idx in enumerate(
utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
# log_perplexity requires the indexes of the documents being evaluated, to know what authors
# correspond to the documents.
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# do_estep requires the indexes of the documents being trained on, to know what authors
# correspond to the documents.
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
There are basically two use cases of this method:
1. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
2. `chunk` is a test set (held-out data), and author2doc and doc2author
corrsponding to this test set are provided. There must not be any new authors
passed to this method. `chunk_doc_idx` is not needed in this case.
To obtain the per-word bound, compute:
>>> corpus_words = sum(cnt for document in corpus for _, cnt in document)
>>> model.bound(corpus, author2doc=author2doc, doc2author=doc2author) / corpus_words
"""
# TODO: enable evaluation of documents with new authors. One could, for example, make it
# possible to pass a list of documents to self.inference with no author dictionaries,
# assuming all the documents correspond to one (unseen) author, learn the author's
# gamma, and return gamma (without adding it to self.state.gamma). Of course,
# collect_sstats should be set to false, so that the model is not updated w.r.t. these
# new documents.
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if author2doc is None and doc2author is None:
# Evaluating on training documents (chunk of self.corpus).
author2doc = self.author2doc
doc2author = self.doc2author
if not chunk_doc_idx:
# If author2doc and doc2author are not provided, chunk is assumed to be a subset of
# self.corpus, and chunk_doc_idx is thus required.
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided. '
'Consult documentation of bound method.'
)
elif author2doc is not None and doc2author is not None:
# Training on held-out documents (documents not seen during training).
# All authors in dictionaries must still be seen during training.
for a in author2doc.keys():
if not self.author2doc.get(a):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided, not both. '
'Consult documentation of bound method.'
)
else:
raise ValueError(
'Either both author2doc and doc2author should be provided, or neither. '
'Consult documentation of bound method.'
)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for d, doc in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
ids = np.array([id for id, _ in doc]) # Word IDs in doc.
cts = np.array([cnt for _, cnt in doc]) # Word counts.
if d % self.chunksize == 0:
logger.debug("bound: at document #%i in chunk", d)
# Computing the bound requires summing over expElogtheta[a, k] * expElogbeta[k, v], which
# is the same computation as in normalizing phi.
phinorm = self.compute_phinorm(expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += np.log(1.0 / len(authors_d)) * sum(cts) + cts.dot(np.log(phinorm))
# Compensate likelihood for when `chunk` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
word_score *= subsample_ratio
# E[log p(theta | alpha) - log q(theta | gamma)]
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum((self.alpha - gamma[a, :]) * Elogtheta[a, :])
theta_score += np.sum(gammaln(gamma[a, :]) - gammaln(self.alpha))
theta_score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :]))
# theta_score is rescaled in a similar fashion.
# TODO: treat this in a more general way, similar to how it is done with word_score.
theta_score *= self.num_authors / len(author2doc)
# E[log p(beta | eta) - log q (beta | lambda)]
beta_score = 0.0
beta_score += np.sum((self.eta - _lambda) * Elogbeta)
beta_score += np.sum(gammaln(_lambda) - gammaln(self.eta))
sum_eta = np.sum(self.eta)
beta_score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
total_score = word_score + theta_score + beta_score
return total_score
def get_document_topics(self, word_id, minimum_probability=None):
"""
This method overwrites `LdaModel.get_document_topics` and simply raises an
exception. `get_document_topics` is not valid for the author-topic model,
use `get_author_topics` instead.
"""
raise NotImplementedError(
'Method "get_document_topics" is not valid for the author-topic model. '
'Use the "get_author_topics" method.'
)
def get_author_topics(self, author_name, minimum_probability=None):
"""
Return topic distribution the given author, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `minimum_probability`).
Obtaining topic probabilities of each word, as in LDA (via `per_word_topics`),
is not supported.
"""
author_id = self.author2id[author_name]
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
topic_dist = self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :])
author_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
return author_topics
def __getitem__(self, author_names, eps=None):
"""
Return topic distribution for input author as a list of
(topic_id, topic_probabiity) 2-tuples.
Ingores topics with probaility less than `eps`.
Do not call this method directly, instead use `model[author_names]`.
"""
if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
| 43,800 | 44.34265 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/basemodel.py | class BaseTopicModel(object):
def print_topic(self, topicno, topn=10):
"""Get a single topic as a formatted string.
Parameters
----------
topicno : int
Topic id.
topn : int
Number of words from topic that will be used.
Returns
-------
str
String representation of topic, like '-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + ... '.
"""
return ' + '.join(['%.3f*"%s"' % (v, k) for k, v in self.show_topic(topicno, topn)])
def print_topics(self, num_topics=20, num_words=10):
"""Get the most significant topics (alias for `show_topics()` method).
Parameters
----------
num_topics : int, optional
The number of topics to be selected, if -1 - all topics will be in result (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
Returns
-------
list of (int, list of (str, float))
Sequence with (topic_id, [(word, value), ... ]).
"""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def get_topics(self):
"""Get words X topics matrix.
Returns
--------
numpy.ndarray:
The term topic matrix learned during inference, shape (`num_topics`, `vocabulary_size`).
Raises
------
NotImplementedError
"""
raise NotImplementedError
| 1,563 | 29.076923 | 114 | py |
poincare_glove | poincare_glove-master/gensim/models/hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging
import time
import warnings
import numpy as np
from scipy.special import gammaln, psi # gamma function utils
from six.moves import xrange
from gensim import interfaces, utils, matutils
from gensim.matutils import dirichlet_expectation
from gensim.models import basemodel, ldamodel
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = psi(np.sum(sticks, 0))
ElogW = psi(sticks[0]) - dig_sum
Elog1_W = psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(gammaln(gamma) - gammaln(alpha))
likelihood += gammaln(np.sum(alpha)) - gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
You can infer topic distributions on new, unseen documents with
>>> doc_hdp = hdp[doc_bow]
Inference on new documents is based on the approximately LDA-equivalent topics.
To print 20 topics with top 10 most probable words
>>> hdp.print_topics(num_topics=20, num_words=10)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None, random_state=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.random_state = utils.get_random_state(random_state)
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = self.random_state.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist) if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i', self.m_num_docs_processed, self.m_D)
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
psi(self.m_eta + self.m_lambda[:, word_list]) - \
psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(
ss, Elogsticks_1st,
unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge
)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return score, count
def doc_e_step(self, ss, Elogsticks_1st, unique_words, doc_word_ids, doc_word_counts, var_converge):
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
# very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
# update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd # noqa:F821
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (psi(v) - dig_sum))
likelihood -= np.sum(gammaln(np.sum(v, 0))) - np.sum(gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = \
self.m_lambda[:, word_list] * (1 - rhot) + rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = \
(1.0 - rhot) * self.m_varphi_ss + rhot * sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
# update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] - self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = \
psi(self.m_eta + self.m_lambda) - psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None):
"""
Print the `num_words` most probable words for topic `topic_id`.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topic(topic_id, topn, log, formatted)
def get_topics(self):
"""
Returns:
np.ndarray: `num_topics` x `vocabulary_size` array of floats which represents
the term topic matrix learned during inference.
"""
topics = self.m_lambda + self.m_eta
return topics / topics.sum(axis=1)[:, None]
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
"""
Print the `num_words` most probable words for `num_topics` number of topics.
Set `num_topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s", fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha *= self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
return alpha, beta
def suggested_lda_model(self):
"""
Returns closest corresponding ldamodel object corresponding to current hdp model.
The hdp_to_lda method only returns corresponding alpha, beta values, and this method returns a trained ldamodel.
The num_topics is m_T (default is 150) so as to preserve the matrice shapes when we assign alpha and beta.
"""
alpha, beta = self.hdp_to_lda()
ldam = ldamodel.LdaModel(
num_topics=self.m_T, alpha=alpha, id2word=self.id2word, random_state=self.random_state, dtype=np.float64
)
ldam.expElogbeta[:] = beta
return ldam
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f', i, doc_score)
score += likelihood
total_words += sum(doc_word_counts)
logger.info(
"TEST: average score: %.5f, total score: %.5f, test docs: %d",
score / total_words, score, len(corpus)
)
return score
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, num_topics=10, num_words=10):
return self.show_topics(num_topics, num_words, True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
shown = []
if num_topics < 0:
num_topics = len(self.data)
num_topics = min(num_topics, len(self.data))
for k in xrange(num_topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, num_words)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def print_topic(self, topic_id, topn=None, num_words=None):
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
return self.show_topic(topic_id, topn, formatted=True)
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None,):
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
lambdak = list(self.data[topic_id, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(topic_id, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (topic_id, topic_terms)
# we only return the topic_terms
return topic[1]
def show_topic_terms(self, topic_data, num_words):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:num_words]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = (topic_id, fmt)
return fmt
| 25,094 | 36.067947 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/fasttext.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Shiva Manne <manneshiva@gmail.com>, Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Learn word representations via fasttext's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_.
Notes
-----
There are more ways to get word vectors in Gensim than just FastText.
See wrappers for VarEmbed and WordRank or Word2Vec
This module allows training a word embedding from a training corpus with the additional ability
to obtain word vectors for out-of-vocabulary words.
For a tutorial on gensim's native fasttext, refer to the noteboook -- [2]_
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) fasttext training**
.. [1] P. Bojanowski, E. Grave, A. Joulin, T. Mikolov
Enriching Word Vectors with Subword Information. In arXiv preprint arXiv:1607.04606.
https://arxiv.org/abs/1607.04606
.. [2] https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb
"""
import logging
import struct
import numpy as np
from numpy import ones, vstack, empty, float32 as REAL, sum as np_sum
from gensim.models.word2vec import Word2VecVocab, Word2VecTrainables, train_sg_pair, train_cbow_pair
from gensim.models.keyedvectors import Vocab, FastTextKeyedVectors
from gensim.models.base_any2vec import BaseWordEmbeddingsModel
from gensim.models.utils_any2vec import _compute_ngrams, _ft_hash
from gensim.utils import deprecated, call_on_class_only
from gensim import utils
logger = logging.getLogger(__name__)
try:
from gensim.models.fasttext_inner import train_batch_sg, train_batch_cbow
from gensim.models.fasttext_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
"""Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window)
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
vocab_subwords_indices = []
ngrams_subwords_indices = []
for index in word2_indices:
vocab_subwords_indices += [index]
ngrams_subwords_indices.extend(model.wv.buckets_word[index])
l1_vocab = np_sum(model.wv.syn0_vocab[vocab_subwords_indices], axis=0) # 1 x vector_size
l1_ngrams = np_sum(model.wv.syn0_ngrams[ngrams_subwords_indices], axis=0) # 1 x vector_size
l1 = np_sum([l1_vocab, l1_ngrams], axis=0)
subwords_indices = [vocab_subwords_indices] + [ngrams_subwords_indices]
if (subwords_indices[0] or subwords_indices[1]) and model.cbow_mean:
l1 /= (len(subwords_indices[0]) + len(subwords_indices[1]))
# train on the sliding window for target word
train_cbow_pair(model, word, subwords_indices, l1, alpha, is_ft=True)
result += len(word_vocabs)
return result
def train_batch_sg(model, sentences, alpha, work=None, neu1=None):
"""Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
subwords_indices = (word.index,)
subwords_indices += model.wv.buckets_word[word.index]
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
if pos2 != pos: # don't train on the `word` itself
train_sg_pair(model, model.wv.index2word[word2.index], subwords_indices, alpha, is_ft=True)
result += len(word_vocabs)
return result
FASTTEXT_FILEFORMAT_MAGIC = 793712314
class FastText(BaseWordEmbeddingsModel):
"""Class for training, using and evaluating word representations learned using method
described in [1]_ aka Fasttext.
The model can be stored/loaded via its :meth:`~gensim.models.fasttext.FastText.save()` and
:meth:`~gensim.models.fasttext.FastText.load()` methods, or loaded in a format compatible with the original
fasttext implementation via :meth:`~gensim.models.fasttext.FastText.load_fasttext_format()`.
"""
def __init__(self, sentences=None, sg=0, hs=0, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0, min_n=3, max_n=6, sorted_vocab=1,
bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH, callbacks=()):
"""Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
sg : int {1, 0}
Defines the training algorithm. If 1, skip-gram is used, otherwise, CBOW is employed.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int
Number of iterations (epochs) over the corpus.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information.
If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
Examples
--------
Initialize and train a `FastText` model
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> say_vector = model['say'] # get vector for word
>>> of_vector = model['of'] # get vector for out-of-vocab word
"""
self.load = call_on_class_only
self.load_fasttext_format = call_on_class_only
self.callbacks = callbacks
self.word_ngrams = int(word_ngrams)
if self.word_ngrams <= 1 and max_n == 0:
bucket = 0
self.wv = FastTextKeyedVectors(size, min_n, max_n)
self.vocabulary = FastTextVocab(
max_vocab_size=max_vocab_size, min_count=min_count, sample=sample,
sorted_vocab=bool(sorted_vocab), null_word=null_word)
self.trainables = FastTextTrainables(
vector_size=size, seed=seed, bucket=bucket, hashfxn=hashfxn)
self.wv.bucket = self.bucket
super(FastText, self).__init__(
sentences=sentences, workers=workers, vector_size=size, epochs=iter, callbacks=callbacks,
batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window, seed=seed,
hs=hs, negative=negative, cbow_mean=cbow_mean, min_alpha=min_alpha, fast_version=FAST_VERSION)
@property
@deprecated("Attribute will be removed in 4.0.0, use wv.min_n instead")
def min_n(self):
return self.wv.min_n
@property
@deprecated("Attribute will be removed in 4.0.0, use wv.max_n instead")
def max_n(self):
return self.wv.max_n
@property
@deprecated("Attribute will be removed in 4.0.0, use trainables.bucket instead")
def bucket(self):
return self.trainables.bucket
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_vocab_lockf instead")
def syn0_vocab_lockf(self):
return self.trainables.vectors_vocab_lockf
@syn0_vocab_lockf.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_vocab_lockf instead")
def syn0_vocab_lockf(self, value):
self.trainables.vectors_vocab_lockf = value
@syn0_vocab_lockf.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_vocab_lockf instead")
def syn0_vocab_lockf(self):
del self.trainables.vectors_vocab_lockf
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_ngrams_lockf instead")
def syn0_ngrams_lockf(self):
return self.trainables.vectors_ngrams_lockf
@syn0_ngrams_lockf.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_ngrams_lockf instead")
def syn0_ngrams_lockf(self, value):
self.trainables.vectors_ngrams_lockf = value
@syn0_ngrams_lockf.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_ngrams_lockf instead")
def syn0_ngrams_lockf(self):
del self.trainables.vectors_ngrams_lockf
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.num_ngram_vectors instead")
def num_ngram_vectors(self):
return self.wv.num_ngram_vectors
def build_vocab(self, sentences, update=False, progress_per=10000, keep_raw_vocab=False, trim_rule=None, **kwargs):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
progress_per : int
Indicates how many words to process before showing/updating the progress.
update : bool
If true, the new words in `sentences` will be added to model's vocab.
Example
-------
Train a model and update vocab for online training
>>> from gensim.models import FastText
>>> sentences_1 = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> sentences_2 = [["dude", "say", "wazzup!"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences_1)
>>> model.train(sentences_1, total_examples=model.corpus_count, epochs=model.iter)
>>> model.build_vocab(sentences_2, update=True)
>>> model.train(sentences_2, total_examples=model.corpus_count, epochs=model.iter)
"""
if update:
if not len(self.wv.vocab):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus "
"before doing an online update.")
self.vocabulary.old_vocab_len = len(self.wv.vocab)
self.trainables.old_hash2index_len = len(self.wv.hash2index)
return super(FastText, self).build_vocab(
sentences, update=update, progress_per=progress_per,
keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, **kwargs)
def _set_train_params(self, **kwargs):
pass
def _clear_post_train(self):
self.wv.vectors_norm = None
self.wv.vectors_vocab_norm = None
self.wv.vectors_ngrams_norm = None
self.wv.buckets_word = None
def estimate_memory(self, vocab_size=None, report=None):
vocab_size = vocab_size or len(self.wv.vocab)
vec_size = self.vector_size * np.dtype(np.float32).itemsize
l1_size = self.layer1_size * np.dtype(np.float32).itemsize
report = report or {}
report['vocab'] = len(self.wv.vocab) * (700 if self.hs else 500)
report['syn0_vocab'] = len(self.wv.vocab) * vec_size
num_buckets = self.bucket
if self.hs:
report['syn1'] = len(self.wv.vocab) * l1_size
if self.negative:
report['syn1neg'] = len(self.wv.vocab) * l1_size
if self.word_ngrams > 0 and self.wv.vocab:
buckets = set()
num_ngrams = 0
for word in self.wv.vocab:
ngrams = _compute_ngrams(word, self.min_n, self.max_n)
num_ngrams += len(ngrams)
buckets.update(_ft_hash(ng) % self.bucket for ng in ngrams)
num_buckets = len(buckets)
report['syn0_ngrams'] = len(buckets) * vec_size
# A tuple (48 bytes) with num_ngrams_word ints (8 bytes) for each word
# Only used during training, not stored with the model
report['buckets_word'] = 48 * len(self.wv.vocab) + 8 * num_ngrams
elif self.word_ngrams > 0:
logger.warn(
'subword information is enabled, but no vocabulary could be found, estimated required memory might be '
'inaccurate!'
)
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words, %i buckets and %i dimensions: %i bytes",
len(self.wv.vocab), num_buckets, self.vector_size, report['total']
)
return report
def _do_train_job(self, sentences, alpha, inits):
"""Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
alpha : float
The current learning rate.
inits : (:class:`numpy.ndarray`, :class:`numpy.ndarray`)
Each worker's private work memory.
Returns
-------
(int, int)
Tuple of (effective word count after ignoring unknown words and sentence length trimming, total word count)
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, neu1)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None,
word_count=0, queue_factor=2, report_delay=1.0, callbacks=(), **kwargs):
"""Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For FastText, each sentence must be a list of unicode strings.
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progress-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) **MUST** be provided (if the corpus is the same as was provided to
:meth:`~gensim.models.fasttext.FastText.build_vocab()`, the count of examples in that corpus
will be available in the model's :attr:`corpus_count` property).
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case,
where :meth:`~gensim.models.fasttext.FastText.train()` is only called once,
the model's cached `iter` value should be supplied as `epochs` value.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float
Initial learning rate.
end_alpha : float
Final learning rate. Drops linearly from `start_alpha`.
word_count : int
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
Examples
--------
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences)
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
"""
super(FastText, self).train(
sentences, total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, callbacks=callbacks)
self.trainables.get_vocab_word_vecs(self.wv)
def init_sims(self, replace=False):
"""
init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute
of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors
"""
if replace and hasattr(self.trainables, 'syn1'):
del self.trainables.syn1
return self.wv.init_sims(replace)
def clear_sims(self):
"""
Removes all L2-normalized vectors for words from the model.
You will have to recompute them using init_sims method.
"""
self._clear_post_train()
@deprecated("Method will be removed in 4.0.0, use self.wv.__getitem__() instead")
def __getitem__(self, words):
"""
Deprecated. Use self.wv.__getitem__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`
"""
return self.wv.__getitem__(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.__contains__() instead")
def __contains__(self, word):
"""
Deprecated. Use self.wv.__contains__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__contains__`
"""
return self.wv.__contains__(word)
@classmethod
def load_fasttext_format(cls, model_file, encoding='utf8'):
"""
Load the input-hidden weight matrix from the fast text output files.
Note that due to limitations in the FastText API, you cannot continue training
with a model loaded this way, though you can query for word similarity etc.
Parameters
----------
model_file : str
Path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as gensim requires only `.bin` file to load entire fastText model.
encoding : str
Specifies the encoding.
Returns
-------
:obj: `~gensim.models.fasttext.FastText`
Returns the loaded model as an instance of :class: `~gensim.models.fasttext.FastText`.
"""
model = cls()
if not model_file.endswith('.bin'):
model_file += '.bin'
model.file_name = model_file
model.load_binary_data(encoding=encoding)
return model
def load_binary_data(self, encoding='utf8'):
"""Loads data from the output binary file created by FastText training"""
with utils.smart_open(self.file_name, 'rb') as f:
self._load_model_params(f)
self._load_dict(f, encoding=encoding)
self._load_vectors(f)
def _load_model_params(self, file_handle):
magic, version = self.struct_unpack(file_handle, '@2i')
if magic == FASTTEXT_FILEFORMAT_MAGIC: # newer format
self.new_format = True
dim, ws, epoch, min_count, neg, _, loss, model, bucket, minn, maxn, _, t = \
self.struct_unpack(file_handle, '@12i1d')
else: # older format
self.new_format = False
dim = magic
ws = version
epoch, min_count, neg, _, loss, model, bucket, minn, maxn, _, t = self.struct_unpack(file_handle, '@10i1d')
# Parameters stored by [Args::save](https://github.com/facebookresearch/fastText/blob/master/src/args.cc)
self.wv.vector_size = dim
self.vector_size = dim
self.window = ws
self.epochs = epoch
self.vocabulary.min_count = min_count
self.negative = neg
self.hs = loss == 1
self.sg = model == 2
self.trainables.bucket = bucket
self.wv.bucket = bucket
self.wv.min_n = minn
self.wv.max_n = maxn
self.vocabulary.sample = t
def _load_dict(self, file_handle, encoding='utf8'):
vocab_size, nwords, nlabels = self.struct_unpack(file_handle, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, self.file_name)
self.struct_unpack(file_handle, '@1q') # number of tokens
if self.new_format:
pruneidx_size, = self.struct_unpack(file_handle, '@q')
for i in range(vocab_size):
word_bytes = b''
char_byte = file_handle.read(1)
# Read vocab word
while char_byte != b'\x00':
word_bytes += char_byte
char_byte = file_handle.read(1)
word = word_bytes.decode(encoding)
count, _ = self.struct_unpack(file_handle, '@qb')
self.wv.vocab[word] = Vocab(index=i, count=count)
self.wv.index2word.append(word)
assert len(self.wv.vocab) == nwords, (
'mismatch between final vocab size ({} words), '
'and expected number of words ({} words)'.format(len(self.wv.vocab), nwords))
if len(self.wv.vocab) != vocab_size:
# expecting to log this warning only for pretrained french vector, wiki.fr
logger.warning(
"mismatch between final vocab size (%s words), and expected vocab size (%s words)",
len(self.wv.vocab), vocab_size
)
if self.new_format:
for j in range(pruneidx_size):
self.struct_unpack(file_handle, '@2i')
def _load_vectors(self, file_handle):
if self.new_format:
self.struct_unpack(file_handle, '@?') # bool quant_input in fasttext.cc
num_vectors, dim = self.struct_unpack(file_handle, '@2q')
# Vectors stored by [Matrix::save](https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc)
assert self.wv.vector_size == dim, (
'mismatch between vector size in model params ({}) and model vectors ({})'
.format(self.wv.vector_size, dim)
)
float_size = struct.calcsize('@f')
if float_size == 4:
dtype = np.dtype(np.float32)
elif float_size == 8:
dtype = np.dtype(np.float64)
self.num_original_vectors = num_vectors
self.wv.vectors_ngrams = np.fromfile(file_handle, dtype=dtype, count=num_vectors * dim)
self.wv.vectors_ngrams = self.wv.vectors_ngrams.reshape((num_vectors, dim))
assert self.wv.vectors_ngrams.shape == (
self.trainables.bucket + len(self.wv.vocab), self.wv.vector_size), \
'mismatch between actual weight matrix shape {} and expected shape {}'\
.format(
self.wv.vectors_ngrams.shape, (self.trainables.bucket + len(self.wv.vocab), self.wv.vector_size)
)
self.trainables.init_ngrams_post_load(self.file_name, self.wv)
self._clear_post_train()
def struct_unpack(self, file_handle, fmt):
num_bytes = struct.calcsize(fmt)
return struct.unpack(fmt, file_handle.read(num_bytes))
def save(self, *args, **kwargs):
"""Save the model. This saved model can be loaded again using :func:`~gensim.models.fasttext.FastText.load`,
which supports online training and getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
kwargs['ignore'] = kwargs.get(
'ignore', ['vectors_norm', 'vectors_vocab_norm', 'vectors_ngrams_norm', 'buckets_word'])
super(FastText, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Loads a previously saved `FastText` model. Also see `save()`.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:obj: `~gensim.models.fasttext.FastText`
Returns the loaded model as an instance of :class: `~gensim.models.fasttext.FastText`.
"""
try:
model = super(FastText, cls).load(*args, **kwargs)
if not hasattr(model.trainables, 'vectors_vocab_lockf') and hasattr(model.wv, 'vectors_vocab'):
model.trainables.vectors_vocab_lockf = ones(len(model.trainables.vectors), dtype=REAL)
if not hasattr(model.trainables, 'vectors_ngrams_lockf') and hasattr(model.wv, 'vectors_ngrams'):
model.trainables.vectors_ngrams_lockf = ones(len(model.trainables.vectors), dtype=REAL)
return model
except AttributeError:
logger.info('Model saved using code from earlier Gensim Version. Re-loading old model in a compatible way.')
from gensim.models.deprecated.fasttext import load_old_fasttext
return load_old_fasttext(*args, **kwargs)
@deprecated("Method will be removed in 4.0.0, use self.wv.accuracy() instead")
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
most_similar = most_similar or FastTextKeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
class FastTextVocab(Word2VecVocab):
def __init__(self, max_vocab_size=None, min_count=5, sample=1e-3, sorted_vocab=True, null_word=0):
super(FastTextVocab, self).__init__(
max_vocab_size=max_vocab_size, min_count=min_count, sample=sample,
sorted_vocab=sorted_vocab, null_word=null_word)
def prepare_vocab(self, hs, negative, wv, update=False, keep_raw_vocab=False, trim_rule=None,
min_count=None, sample=None, dry_run=False):
report_values = super(FastTextVocab, self).prepare_vocab(
hs, negative, wv, update=update, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule,
min_count=min_count, sample=sample, dry_run=dry_run)
return report_values
class FastTextTrainables(Word2VecTrainables):
def __init__(self, vector_size=100, seed=1, hashfxn=hash, bucket=2000000):
super(FastTextTrainables, self).__init__(
vector_size=vector_size, seed=seed, hashfxn=hashfxn)
self.bucket = int(bucket)
def prepare_weights(self, hs, negative, wv, update=False, vocabulary=None):
super(FastTextTrainables, self).prepare_weights(hs, negative, wv, update=update, vocabulary=vocabulary)
self.init_ngrams_weights(wv, update=update, vocabulary=vocabulary)
def init_ngrams_weights(self, wv, update=False, vocabulary=None):
"""Compute ngrams of all words present in vocabulary and stores vectors for only those ngrams.
Vectors for other ngrams are initialized with a random uniform distribution in FastText.
Parameters
----------
update : bool
If True, the new vocab words and their new ngrams word vectors are initialized
with random uniform distribution and updated/added to the existing vocab word and ngram vectors.
"""
if not update:
wv.vectors_vocab = empty((len(wv.vocab), wv.vector_size), dtype=REAL)
self.vectors_vocab_lockf = ones((len(wv.vocab), wv.vector_size), dtype=REAL)
wv.vectors_ngrams = empty((self.bucket, wv.vector_size), dtype=REAL)
self.vectors_ngrams_lockf = ones((self.bucket, wv.vector_size), dtype=REAL)
wv.hash2index = {}
wv.buckets_word = {}
ngram_indices = []
for word, vocab in wv.vocab.items():
buckets = []
for ngram in _compute_ngrams(word, wv.min_n, wv.max_n):
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash not in wv.hash2index:
wv.hash2index[ngram_hash] = len(ngram_indices)
ngram_indices.append(ngram_hash)
buckets.append(wv.hash2index[ngram_hash])
wv.buckets_word[vocab.index] = tuple(buckets)
wv.num_ngram_vectors = len(ngram_indices)
logger.info("Total number of ngrams is %d", wv.num_ngram_vectors)
wv.vectors_ngrams = wv.vectors_ngrams.take(ngram_indices, axis=0)
self.vectors_ngrams_lockf = self.vectors_ngrams_lockf.take(ngram_indices, axis=0)
self.reset_ngrams_weights(wv)
else:
wv.buckets_word = {}
num_new_ngrams = 0
for word, vocab in wv.vocab.items():
buckets = []
for ngram in _compute_ngrams(word, wv.min_n, wv.max_n):
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash not in wv.hash2index:
wv.hash2index[ngram_hash] = num_new_ngrams + self.old_hash2index_len
num_new_ngrams += 1
buckets.append(wv.hash2index[ngram_hash])
wv.buckets_word[vocab.index] = tuple(buckets)
wv.num_ngram_vectors += num_new_ngrams
logger.info("Number of new ngrams is %d", num_new_ngrams)
rand_obj = np.random
rand_obj.seed(self.seed)
new_vocab_rows = rand_obj.uniform(
-1.0 / wv.vector_size, 1.0 / wv.vector_size,
(len(wv.vocab) - vocabulary.old_vocab_len, wv.vector_size)
).astype(REAL)
new_vocab_lockf_rows = ones(
(len(wv.vocab) - vocabulary.old_vocab_len, wv.vector_size), dtype=REAL)
new_ngram_rows = rand_obj.uniform(
-1.0 / wv.vector_size, 1.0 / wv.vector_size,
(len(wv.hash2index) - self.old_hash2index_len, wv.vector_size)
).astype(REAL)
new_ngram_lockf_rows = ones(
(len(wv.hash2index) - self.old_hash2index_len, wv.vector_size), dtype=REAL)
wv.vectors_vocab = vstack([wv.vectors_vocab, new_vocab_rows])
self.vectors_vocab_lockf = vstack([self.vectors_vocab_lockf, new_vocab_lockf_rows])
wv.vectors_ngrams = vstack([wv.vectors_ngrams, new_ngram_rows])
self.vectors_ngrams_lockf = vstack([self.vectors_ngrams_lockf, new_ngram_lockf_rows])
def reset_ngrams_weights(self, wv):
"""Reset all projection weights to an initial (untrained) state,
but keep the existing vocabulary and their ngrams.
"""
rand_obj = np.random
rand_obj.seed(self.seed)
for index in range(len(wv.vocab)):
wv.vectors_vocab[index] = rand_obj.uniform(
-1.0 / wv.vector_size, 1.0 / wv.vector_size, wv.vector_size
).astype(REAL)
for index in range(len(wv.hash2index)):
wv.vectors_ngrams[index] = rand_obj.uniform(
-1.0 / wv.vector_size, 1.0 / wv.vector_size, wv.vector_size
).astype(REAL)
def get_vocab_word_vecs(self, wv):
"""Calculate vectors for words in vocabulary and stores them in `vectors`."""
for w, v in wv.vocab.items():
word_vec = np.copy(wv.vectors_vocab[v.index])
ngrams = _compute_ngrams(w, wv.min_n, wv.max_n)
ngram_weights = wv.vectors_ngrams
for ngram in ngrams:
word_vec += ngram_weights[wv.hash2index[_ft_hash(ngram) % self.bucket]]
word_vec /= (len(ngrams) + 1)
wv.vectors[v.index] = word_vec
def init_ngrams_post_load(self, file_name, wv):
"""
Computes ngrams of all words present in vocabulary and stores vectors for only those ngrams.
Vectors for other ngrams are initialized with a random uniform distribution in FastText. These
vectors are discarded here to save space.
"""
wv.vectors = np.zeros((len(wv.vocab), wv.vector_size), dtype=REAL)
for w, vocab in wv.vocab.items():
wv.vectors[vocab.index] += np.array(wv.vectors_ngrams[vocab.index])
ngram_indices = []
wv.num_ngram_vectors = 0
for word in wv.vocab.keys():
for ngram in _compute_ngrams(word, wv.min_n, wv.max_n):
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash in wv.hash2index:
continue
wv.hash2index[ngram_hash] = len(ngram_indices)
ngram_indices.append(len(wv.vocab) + ngram_hash)
wv.num_ngram_vectors = len(ngram_indices)
wv.vectors_ngrams = wv.vectors_ngrams.take(ngram_indices, axis=0)
ngram_weights = wv.vectors_ngrams
logger.info(
"loading weights for %s words for fastText model from %s",
len(wv.vocab), file_name
)
for w, vocab in wv.vocab.items():
word_ngrams = _compute_ngrams(w, wv.min_n, wv.max_n)
for word_ngram in word_ngrams:
vec_idx = wv.hash2index[_ft_hash(word_ngram) % self.bucket]
wv.vectors[vocab.index] += np.array(ngram_weights[vec_idx])
wv.vectors[vocab.index] /= (len(word_ngrams) + 1)
logger.info(
"loaded %s weight matrix for fastText model from %s",
wv.vectors.shape, file_name
)
| 43,325 | 47.193548 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/ldamulticore.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jan Zikes, Radim Rehurek
# Copyright (C) 2014 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Latent Dirichlet Allocation (LDA) in Python, using all CPU cores to parallelize and
speed up model training.
The parallelization uses multiprocessing; in case this doesn't work for you for
some reason, try the :class:`gensim.models.ldamodel.LdaModel` class which is an
equivalent, but more straightforward and single-core implementation.
The training algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.5M documents, 100K features, 0.54G non-zero entries in the final
bag-of-words matrix), requesting 100 topics:
====================================================== ==============
algorithm training time
====================================================== ==============
LdaMulticore(workers=1) 2h30m
LdaMulticore(workers=2) 1h24m
LdaMulticore(workers=3) 1h6m
old LdaModel() 3h44m
simply iterating over input corpus = I/O overhead 20m
====================================================== ==============
(Measured on `this i7 server <http://www.hetzner.de/en/hosting/produkte_rootserver/ex40ssd>`_
with 4 physical cores, so that optimal `workers=3`, one less than the number of cores.)
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see
**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
.. [1] http://www.cs.princeton.edu/~mdhoffma
"""
import logging
import numpy as np
from gensim import utils
from gensim.models.ldamodel import LdaModel, LdaState
import six
from six.moves import queue, xrange
from multiprocessing import Pool, Queue, cpu_count
logger = logging.getLogger(__name__)
class LdaMulticore(LdaModel):
"""
The constructor estimates Latent Dirichlet Allocation model parameters based
on a training corpus:
>>> lda = LdaMulticore(corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents, with
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents via
>>> lda.update(other_corpus)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, workers=None,
chunksize=2000, passes=1, batch=False, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50,
gamma_threshold=0.001, random_state=None, minimum_probability=0.01,
minimum_phi_value=0.01, per_word_topics=False, dtype=np.float32):
"""
If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`workers` is the number of extra processes to use for parallelization. Uses
all available cores by default: `workers=cpu_count()-1`. **Note**: for
hyper-threaded CPUs, `cpu_count()` returns a useless number -- set `workers`
directly to the number of your **real** cores (not hyperthreads) minus one,
for optimal performance.
If `batch` is not set, perform online training by updating the model once
every `workers * chunksize` documents (online training). Otherwise,
run batch LDA, updating model only once at the end of each full corpus pass.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a matrix of shape num_topics x num_words,
which can be used to impose asymmetric priors over the word
distribution on a per-topic basis. This may be useful if you
want to seed certain topics with particular words by boosting
the priors for those words.
Calculate and log perplexity estimate from the latest mini-batch once every
`eval_every` documents. Set to `None` to disable perplexity estimation (faster),
or to `0` to only evaluate perplexity once, at the end of each corpus pass.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively.
`random_state` can be a numpy.random.RandomState object or the seed for one
Example:
>>> lda = LdaMulticore(corpus, id2word=id2word, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
"""
self.workers = max(1, cpu_count() - 1) if workers is None else workers
self.batch = batch
if isinstance(alpha, six.string_types) and alpha == 'auto':
raise NotImplementedError("auto-tuning alpha not implemented in multicore LDA; use plain LdaModel.")
super(LdaMulticore, self).__init__(
corpus=corpus, num_topics=num_topics,
id2word=id2word, chunksize=chunksize, passes=passes, alpha=alpha, eta=eta,
decay=decay, offset=offset, eval_every=eval_every, iterations=iterations,
gamma_threshold=gamma_threshold, random_state=random_state, minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value, per_word_topics=per_word_topics, dtype=dtype
)
def update(self, corpus, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
The E-step is distributed into the several processes.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>.
"""
try:
lencorpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaMulticore.update() called with an empty corpus")
return
self.state.numdocs += lencorpus
if not self.batch:
updatetype = "online"
updateafter = self.chunksize * self.workers
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (self.eval_every or 0) * updateafter)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over the supplied corpus of %i documents, "
"updating every %i documents, evaluating every ~%i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, self.passes, lencorpus, updateafter,
evalafter, self.iterations, self.gamma_threshold
)
if updates_per_pass * self.passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
job_queue = Queue(maxsize=2 * self.workers)
result_queue = Queue()
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(self.offset + pass_ + (self.num_updates / self.chunksize), -self.decay)
logger.info("training LDA model using %i processes", self.workers)
pool = Pool(self.workers, worker_e_step, (job_queue, result_queue,))
for pass_ in xrange(self.passes):
queue_size, reallen = [0], 0
other = LdaState(self.eta, self.state.sstats.shape)
def process_result_queue(force=False):
"""
Clear the result queue, merging all intermediate results, and update the
LDA model if necessary.
"""
merged_new = False
while not result_queue.empty():
other.merge(result_queue.get())
queue_size[0] -= 1
merged_new = True
if (force and merged_new and queue_size[0] == 0) or (not self.batch and (other.numdocs >= updateafter)):
self.do_mstep(rho(), other, pass_ > 0)
other.reset()
if self.eval_every is not None and \
((force and queue_size[0] == 0) or
(self.eval_every != 0 and (self.num_updates / updateafter) % self.eval_every == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
chunk_stream = utils.grouper(corpus, self.chunksize, as_numpy=chunks_as_numpy)
for chunk_no, chunk in enumerate(chunk_stream):
reallen += len(chunk) # keep track of how many documents we've processed so far
# put the chunk into the workers' input job queue
chunk_put = False
while not chunk_put:
try:
job_queue.put((chunk_no, chunk, self), block=False, timeout=0.1)
chunk_put = True
queue_size[0] += 1
logger.info(
"PROGRESS: pass %i, dispatched chunk #%i = documents up to #%i/%i, "
"outstanding queue size %i",
pass_, chunk_no, chunk_no * self.chunksize + len(chunk), lencorpus, queue_size[0]
)
except queue.Full:
# in case the input job queue is full, keep clearing the
# result queue, to make sure we don't deadlock
process_result_queue()
process_result_queue()
# endfor single corpus pass
# wait for all outstanding jobs to finish
while queue_size[0] > 0:
process_result_queue(force=True)
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# endfor entire update
pool.terminate()
def worker_e_step(input_queue, result_queue):
"""
Perform E-step for each (chunk_no, chunk, model) 3-tuple from the
input queue, placing the resulting state into the result queue.
"""
logger.debug("worker process entering E-step loop")
while True:
logger.debug("getting a new job")
chunk_no, chunk, worker_lda = input_queue.get()
logger.debug("processing chunk #%i of %i documents", chunk_no, len(chunk))
worker_lda.state.reset()
worker_lda.do_estep(chunk) # TODO: auto-tune alpha?
del chunk
logger.debug("processed chunk, queuing the result")
result_queue.put(worker_lda.state)
del worker_lda # free up some memory
logger.debug("result put")
| 13,164 | 43.627119 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/utils_any2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <s.manne@rare-technologies.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
"""General functions used for any2vec models."""
import logging
import numpy as np
from gensim import utils
from numpy import zeros, dtype, float32 as REAL, ascontiguousarray, fromstring
from six.moves import xrange
from six import iteritems
logger = logging.getLogger(__name__)
try:
from gensim.models._utils_any2vec import ft_hash as _ft_hash, compute_ngrams as _compute_ngrams
except ImportError:
FAST_VERSION = -1
# failed... fall back to plain python
def _ft_hash(string):
"""Calculate hash based on `string`.
Reproduce `hash method from Facebook fastText implementation
<https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc>`_.
Parameters
----------
string : str
The string whose hash needs to be calculated.
Returns
-------
int
The hash of the string.
"""
# Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.
old_settings = np.seterr(all='ignore')
h = np.uint32(2166136261)
for c in string:
h = h ^ np.uint32(ord(c))
h = h * np.uint32(16777619)
np.seterr(**old_settings)
return h
def _compute_ngrams(word, min_n, max_n):
"""Get the list of all possible ngrams for a given word.
Parameters
----------
word : str
The word whose ngrams need to be computed.
min_n : int
Minimum character length of the ngrams.
max_n : int
Maximum character length of the ngrams.
Returns
-------
list of str
Sequence of character ngrams.
"""
BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix
extended_word = BOW + word + EOW
ngrams = []
for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):
for i in range(0, len(extended_word) - ngram_length + 1):
ngrams.append(extended_word[i:i + ngram_length])
return ngrams
def _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in
vocab : dict
The vocabulary of words
vectors : numpy.array
The vectors to be stored
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
if not (vocab or vectors):
raise RuntimeError("no input")
if total_vec is None:
total_vec = len(vocab)
vector_size = vectors.shape[1]
if fvocab is not None:
logger.info("storing vocabulary in %s", fvocab)
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab_.count)))
logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
assert (len(vocab), vector_size) == vectors.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
# store in sorted order: most frequent words at the top
for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
row = vectors[vocab_.index]
if binary:
row = row.astype(REAL)
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join(repr(val) for val in row))))
def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str
Optional file path to the vocabulary.Word counts are read from `fvocab` filename,
if set (this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool
If True, indicates whether the data is in binary word2vec format.
encoding : str
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
unicode_errors : str
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : :class: `numpy.float*`
(Experimental) Can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
Returns
-------
:obj: `cls`
Returns the loaded model as an instance of :class: `cls`.
"""
from gensim.models.keyedvectors import Vocab
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls(vector_size)
result.vector_size = vector_size
result.vectors = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.vectors[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [datatype(x) for x in parts[1:]]
add_word(word, weights)
if result.vectors.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.vectors.shape[0], len(result.vocab)
)
result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.vectors.shape
logger.info("loaded %s matrix from %s", result.vectors.shape, fname)
return result
| 10,055 | 41.610169 | 118 | py |
poincare_glove | poincare_glove-master/gensim/models/lsi_worker.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
""":class:`~gensim.models.lsi_worker.Worker` ("slave") process used in computing
distributed :class:`~gensim.models.lsimodel.LsiModel`.
Run this script on every node in your cluster. If you wish, you may even run it multiple times on a single machine,
to make better use of multiple cores (just beware that memory footprint increases accordingly).
Warnings
--------
Requires installed `Pyro4 <https://pythonhosted.org/Pyro4/>`_.
Distributed version works only in local network.
How to use distributed :class:`~gensim.models.lsimodel.LsiModel`
----------------------------------------------------------------
#. Install needed dependencies (Pyro4) ::
pip install gensim[distributed]
#. Setup serialization (on each machine) ::
export PYRO_SERIALIZERS_ACCEPTED=pickle
export PYRO_SERIALIZER=pickle
#. Run nameserver ::
python -m Pyro4.naming -n 0.0.0.0 &
#. Run workers (on each machine) ::
python -m gensim.models.lsi_worker &
#. Run dispatcher ::
python -m gensim.models.lsi_dispatcher &
#. Run :class:`~gensim.models.lsimodel.LsiModel` in distributed mode ::
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import LsiModel
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary, distributed=True)
Command line arguments
----------------------
.. program-output:: python -m gensim.models.lsi_worker --help
:ellipsis: 0, -3
"""
from __future__ import with_statement
import os
import sys
import logging
import argparse
import threading
import tempfile
try:
import Queue
except ImportError:
import queue as Queue
import Pyro4
from gensim.models import lsimodel
from gensim import utils
logger = logging.getLogger(__name__)
SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
class Worker(object):
def __init__(self):
"""Partly initializes the model.
A full initialization requires a call to :meth:`~gensim.models.lsi_worker.Worker.initialize`.
"""
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
"""Fully initializes the worker.
Parameters
----------
myid : int
An ID number used to identify this worker in the dispatcher object.
dispatcher : :class:`~gensim.models.lsi_dispatcher.Dispatcher`
The dispatcher responsible for scheduling this worker.
**model_params
Keyword parameters to initialize the inner LSI model, see :class:`~gensim.models.lsimodel.LsiModel`.
"""
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
# id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.myid = myid
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s", myid)
self.model = lsimodel.LsiModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""Request jobs from the dispatcher, in a perpetual loop until
:meth:`~gensim.models.lsi_worker.Worker.getstate()` is called.
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i", self.myid, self.jobsdone)
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs", self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
"""Incrementally processes the job and potentially logs progress.
Parameters
----------
job : iterable of list of (int, float)
Corpus in BoW format.
"""
self.model.add_documents(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
"""Log and get the LSI model's current projection.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
The current projection.
"""
logger.info("worker #%i returning its state after %s jobs", self.myid, self.jobsdone)
assert isinstance(self.model.projection, lsimodel.Projection)
self.finished = True
return self.model.projection
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self):
"""Resets the worker by deleting its current projection."""
logger.info("resetting worker #%i", self.myid)
self.model.projection = self.model.projection.empty_like()
self.finished = False
@Pyro4.oneway
def exit(self):
"""Terminates the worker."""
logger.info("terminating worker #%i", self.myid)
os._exit(0)
if __name__ == '__main__':
"""The main script. """
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
_ = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
logger.info("finished running %s", parser.prog)
| 6,037 | 30.284974 | 115 | py |
poincare_glove | poincare_glove-master/gensim/models/coherencemodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for calculating topic coherence in python. This is the implementation of
the four stage topic coherence pipeline from the paper [1]_.
The four stage pipeline is basically:
Segmentation -> Probability Estimation -> Confirmation Measure -> Aggregation.
Implementation of this pipeline allows for the user to in essence "make" a
coherence measure of his/her choice by choosing a method in each of the pipelines.
.. [1] Michael Roeder, Andreas Both and Alexander Hinneburg. Exploring the space of topic
coherence measures. http://svn.aksw.org/papers/2015/WSDM_Topic_Evaluation/public.pdf.
"""
import logging
import multiprocessing as mp
from collections import namedtuple
import numpy as np
from gensim import interfaces, matutils
from gensim import utils
from gensim.topic_coherence import (segmentation, probability_estimation,
direct_confirmation_measure, indirect_confirmation_measure,
aggregation)
from gensim.topic_coherence.probability_estimation import unique_ids_from_segments
logger = logging.getLogger(__name__)
BOOLEAN_DOCUMENT_BASED = {'u_mass'}
SLIDING_WINDOW_BASED = {'c_v', 'c_uci', 'c_npmi', 'c_w2v'}
_make_pipeline = namedtuple('Coherence_Measure', 'seg, prob, conf, aggr')
COHERENCE_MEASURES = {
'u_mass': _make_pipeline(
segmentation.s_one_pre,
probability_estimation.p_boolean_document,
direct_confirmation_measure.log_conditional_probability,
aggregation.arithmetic_mean
),
'c_v': _make_pipeline(
segmentation.s_one_set,
probability_estimation.p_boolean_sliding_window,
indirect_confirmation_measure.cosine_similarity,
aggregation.arithmetic_mean
),
'c_w2v': _make_pipeline(
segmentation.s_one_set,
probability_estimation.p_word2vec,
indirect_confirmation_measure.word2vec_similarity,
aggregation.arithmetic_mean
),
'c_uci': _make_pipeline(
segmentation.s_one_one,
probability_estimation.p_boolean_sliding_window,
direct_confirmation_measure.log_ratio_measure,
aggregation.arithmetic_mean
),
'c_npmi': _make_pipeline(
segmentation.s_one_one,
probability_estimation.p_boolean_sliding_window,
direct_confirmation_measure.log_ratio_measure,
aggregation.arithmetic_mean
),
}
SLIDING_WINDOW_SIZES = {
'c_v': 110,
'c_w2v': 5,
'c_uci': 10,
'c_npmi': 10,
'u_mass': None
}
class CoherenceModel(interfaces.TransformationABC):
"""Objects of this class allow for building and maintaining a model for topic coherence.
The main methods are:
1. constructor, which initializes the four stage pipeline by accepting a coherence measure,
2. the ``get_coherence()`` method, which returns the topic coherence.
Pipeline phases can also be executed individually. Methods for doing this are:
1. `segment_topics()`, which performs segmentation of the given topics into their comparison sets.
2. `estimate_probabilities()`, which accumulates word occurrence stats from the given corpus or texts.
The output of this is also cached on the `CoherenceModel`, so calling this method can be used as
a precomputation step for the next phase.
3. `get_coherence_per_topic()`, which uses the segmented topics and estimated probabilities to compute
the coherence of each topic. This output can be used to rank topics in order of most coherent to
least. Such a ranking is useful if the intended use case of a topic model is document exploration
by a human. It is also useful for filtering out incoherent topics (keep top-n from ranked list).
4. `aggregate_measures(topic_coherences)`, which uses the pipeline's aggregation method to compute
the overall coherence from the topic coherences.
One way of using this feature is through providing a trained topic model. A dictionary has to be explicitly
provided if the model does not contain a dictionary already::
cm = CoherenceModel(model=tm, corpus=corpus, coherence='u_mass') # tm is the trained topic model
cm.get_coherence()
Another way of using this feature is through providing tokenized topics such as::
topics = [['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']]
# note that a dictionary has to be provided.
cm = CoherenceModel(topics=topics, corpus=corpus, dictionary=dictionary, coherence='u_mass')
cm.get_coherence()
Model persistency is achieved via its load/save methods.
"""
def __init__(self, model=None, topics=None, texts=None, corpus=None, dictionary=None,
window_size=None, keyed_vectors=None, coherence='c_v', topn=20, processes=-1):
"""
Args:
model : Pre-trained topic model. Should be provided if topics is not provided.
Currently supports LdaModel, LdaMallet wrapper and LdaVowpalWabbit wrapper. Use 'topics'
parameter to plug in an as yet unsupported model.
topics : List of tokenized topics. If this is preferred over model, dictionary should be provided.
eg::
topics = [['human', 'machine', 'computer', 'interface'],
['graph', 'trees', 'binary', 'widths']]
texts : Tokenized texts. Needed for coherence models that use sliding window based probability estimator,
eg::
texts = [['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
corpus : Gensim document corpus.
dictionary : Gensim dictionary mapping of id word to create corpus. If model.id2word is present,
this is not needed. If both are provided, dictionary will be used.
window_size : Is the size of the window to be used for coherence measures using boolean sliding window
as their probability estimator. For 'u_mass' this doesn't matter.
If left 'None' the default window sizes are used which are:
'c_v' : 110
'c_uci' : 10
'c_npmi' : 10
coherence : Coherence measure to be used. Supported values are:
'u_mass'
'c_v'
'c_uci' also popularly known as c_pmi
'c_npmi'
For 'u_mass' corpus should be provided. If texts is provided, it will be converted
to corpus using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' texts should be provided.
Corpus is not needed.
topn : Integer corresponding to the number of top words to be extracted from each topic.
processes : number of processes to use for probability estimation phase; any value
less than 1 will be interpreted to mean num_cpus - 1; default is -1.
"""
if model is None and topics is None:
raise ValueError("One of model or topics has to be provided.")
elif topics is not None and dictionary is None:
raise ValueError("dictionary has to be provided if topics are to be used.")
self.keyed_vectors = keyed_vectors
if keyed_vectors is None and texts is None and corpus is None:
raise ValueError("One of texts or corpus has to be provided.")
# Check if associated dictionary is provided.
if dictionary is None:
if isinstance(model.id2word, utils.FakeDict):
raise ValueError(
"The associated dictionary should be provided with the corpus or 'id2word'"
" for topic model should be set as the associated dictionary.")
else:
self.dictionary = model.id2word
else:
self.dictionary = dictionary
# Check for correct inputs for u_mass coherence measure.
self.coherence = coherence
self.window_size = window_size
if self.window_size is None:
self.window_size = SLIDING_WINDOW_SIZES[self.coherence]
self.texts = texts
self.corpus = corpus
if coherence in BOOLEAN_DOCUMENT_BASED:
if utils.is_corpus(corpus)[0]:
self.corpus = corpus
elif self.texts is not None:
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
else:
raise ValueError(
"Either 'corpus' with 'dictionary' or 'texts' should "
"be provided for %s coherence.", coherence)
# Check for correct inputs for sliding window coherence measure.
elif coherence == 'c_w2v' and keyed_vectors is not None:
pass
elif coherence in SLIDING_WINDOW_BASED:
if self.texts is None:
raise ValueError("'texts' should be provided for %s coherence.", coherence)
else:
raise ValueError("%s coherence is not currently supported.", coherence)
self._topn = topn
self._model = model
self._accumulator = None
self._topics = None
self.topics = topics
self.processes = processes if processes >= 1 else max(1, mp.cpu_count() - 1)
@classmethod
def for_models(cls, models, dictionary, topn=20, **kwargs):
"""Initialize a CoherenceModel with estimated probabilities for all of the given models.
Args:
models (list): List of models to evalaute coherence of; the only requirement is
that each has a `get_topics` methods.
"""
topics = [cls.top_topics_as_word_lists(model, dictionary, topn) for model in models]
kwargs['dictionary'] = dictionary
kwargs['topn'] = topn
return cls.for_topics(topics, **kwargs)
@staticmethod
def top_topics_as_word_lists(model, dictionary, topn=20):
if not dictionary.id2token:
dictionary.id2token = {v: k for k, v in dictionary.token2id.items()}
str_topics = []
for topic in model.get_topics():
bestn = matutils.argsort(topic, topn=topn, reverse=True)
beststr = [dictionary.id2token[_id] for _id in bestn]
str_topics.append(beststr)
return str_topics
@classmethod
def for_topics(cls, topics_as_topn_terms, **kwargs):
"""Initialize a CoherenceModel with estimated probabilities for all of the given topics.
Args:
topics_as_topn_terms (list of lists): Each element in the top-level list should be
the list of topics for a model. The topics for the model should be a list of
top-N words, one per topic.
"""
if not topics_as_topn_terms:
raise ValueError("len(topics) must be > 0.")
if any(len(topic_lists) == 0 for topic_lists in topics_as_topn_terms):
raise ValueError("found empty topic listing in `topics`")
topn = 0
for topic_list in topics_as_topn_terms:
for topic in topic_list:
topn = max(topn, len(topic))
topn = min(kwargs.pop('topn', topn), topn)
super_topic = utils.flatten(topics_as_topn_terms)
logging.info(
"Number of relevant terms for all %d models: %d",
len(topics_as_topn_terms), len(super_topic))
cm = CoherenceModel(topics=[super_topic], topn=len(super_topic), **kwargs)
cm.estimate_probabilities()
cm.topn = topn
return cm
def __str__(self):
return str(self.measure)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
self._model = model
if model is not None:
new_topics = self._get_topics()
self._update_accumulator(new_topics)
self._topics = new_topics
@property
def topn(self):
return self._topn
@topn.setter
def topn(self, topn):
current_topic_length = len(self._topics[0])
requires_expansion = current_topic_length < topn
if self.model is not None:
self._topn = topn
if requires_expansion:
self.model = self._model # trigger topic expansion from model
else:
if requires_expansion:
raise ValueError("Model unavailable and topic sizes are less than topn=%d" % topn)
self._topn = topn # topics will be truncated in getter
@property
def measure(self):
return COHERENCE_MEASURES[self.coherence]
@property
def topics(self):
if len(self._topics[0]) > self._topn:
return [topic[:self._topn] for topic in self._topics]
else:
return self._topics
@topics.setter
def topics(self, topics):
if topics is not None:
new_topics = []
for topic in topics:
topic_token_ids = self._ensure_elements_are_ids(topic)
new_topics.append(topic_token_ids)
if self.model is not None:
logger.warning(
"The currently set model '%s' may be inconsistent with the newly set topics",
self.model)
elif self.model is not None:
new_topics = self._get_topics()
logger.debug("Setting topics to those of the model: %s", self.model)
else:
new_topics = None
self._update_accumulator(new_topics)
self._topics = new_topics
def _ensure_elements_are_ids(self, topic):
try:
return np.array([self.dictionary.token2id[token] for token in topic])
except KeyError: # might be a list of token ids already, but let's verify all in dict
topic = [self.dictionary.id2token[_id] for _id in topic]
return np.array([self.dictionary.token2id[token] for token in topic])
def _update_accumulator(self, new_topics):
if self._relevant_ids_will_differ(new_topics):
logger.debug("Wiping cached accumulator since it does not contain all relevant ids.")
self._accumulator = None
def _relevant_ids_will_differ(self, new_topics):
if self._accumulator is None or not self._topics_differ(new_topics):
return False
new_set = unique_ids_from_segments(self.measure.seg(new_topics))
return not self._accumulator.relevant_ids.issuperset(new_set)
def _topics_differ(self, new_topics):
return (new_topics is not None and
self._topics is not None and
not np.array_equal(new_topics, self._topics))
def _get_topics(self):
"""Internal helper function to return topics from a trained topic model."""
return self._get_topics_from_model(self.model, self.topn)
@staticmethod
def _get_topics_from_model(model, topn):
"""Internal helper function to return topics from a trained topic model."""
try:
return [
matutils.argsort(topic, topn=topn, reverse=True) for topic in
model.get_topics()
]
except AttributeError:
raise ValueError(
"This topic model is not currently supported. Supported topic models"
" should implement the `get_topics` method.")
def segment_topics(self):
return self.measure.seg(self.topics)
def estimate_probabilities(self, segmented_topics=None):
"""Accumulate word occurrences and co-occurrences from texts or corpus using
the optimal method for the chosen coherence metric. This operation may take
quite some time for the sliding window based coherence methods.
"""
if segmented_topics is None:
segmented_topics = self.segment_topics()
if self.coherence in BOOLEAN_DOCUMENT_BASED:
self._accumulator = self.measure.prob(self.corpus, segmented_topics)
else:
kwargs = dict(
texts=self.texts, segmented_topics=segmented_topics,
dictionary=self.dictionary, window_size=self.window_size,
processes=self.processes)
if self.coherence == 'c_w2v':
kwargs['model'] = self.keyed_vectors
self._accumulator = self.measure.prob(**kwargs)
return self._accumulator
def get_coherence_per_topic(self, segmented_topics=None, with_std=False, with_support=False):
"""Return list of coherence values for each topic based on pipeline parameters."""
measure = self.measure
if segmented_topics is None:
segmented_topics = measure.seg(self.topics)
if self._accumulator is None:
self.estimate_probabilities(segmented_topics)
kwargs = dict(with_std=with_std, with_support=with_support)
if self.coherence in BOOLEAN_DOCUMENT_BASED or self.coherence == 'c_w2v':
pass
elif self.coherence == 'c_v':
kwargs['topics'] = self.topics
kwargs['measure'] = 'nlr'
kwargs['gamma'] = 1
else:
kwargs['normalize'] = (self.coherence == 'c_npmi')
return measure.conf(segmented_topics, self._accumulator, **kwargs)
def aggregate_measures(self, topic_coherences):
"""Aggregate the individual topic coherence measures using
the pipeline's aggregation function.
"""
return self.measure.aggr(topic_coherences)
def get_coherence(self):
"""Return coherence value based on pipeline parameters."""
confirmed_measures = self.get_coherence_per_topic()
return self.aggregate_measures(confirmed_measures)
def compare_models(self, models):
model_topics = [self._get_topics_from_model(model, self.topn) for model in models]
return self.compare_model_topics(model_topics)
def compare_model_topics(self, model_topics):
"""Perform the coherence evaluation for each of the models.
This first precomputes the probabilities once, then evaluates coherence for
each model.
Since we have already precomputed the probabilities, this simply
involves using the accumulated stats in the `CoherenceModel` to
perform the evaluations, which should be pretty quick.
Args:
model_topics (list): of lists of top-N words for the model trained with that
number of topics.
Returns:
list: of `(avg_topic_coherences, avg_coherence)`.
These are the coherence values per topic and the overall model coherence.
"""
orig_topics = self._topics
orig_topn = self.topn
try:
coherences = self._compare_model_topics(model_topics)
finally:
self.topics = orig_topics
self.topn = orig_topn
return coherences
def _compare_model_topics(self, model_topics):
coherences = []
last_topn_value = min(self.topn - 1, 4)
topn_grid = list(range(self.topn, last_topn_value, -5))
for model_num, topics in enumerate(model_topics):
self.topics = topics
# We evaluate at various values of N and average them. This is a more robust,
# according to: http://people.eng.unimelb.edu.au/tbaldwin/pubs/naacl2016.pdf
coherence_at_n = {}
for n in topn_grid:
self.topn = n
topic_coherences = self.get_coherence_per_topic()
# Let's record the coherences for each topic, as well as the aggregated
# coherence across all of the topics.
# Some of them may be nan (if all words were OOV), so do mean value imputation.
filled_coherences = np.array(topic_coherences)
filled_coherences[np.isnan(filled_coherences)] = np.nanmean(filled_coherences)
coherence_at_n[n] = (topic_coherences, self.aggregate_measures(filled_coherences))
topic_coherences, avg_coherences = zip(*coherence_at_n.values())
avg_topic_coherences = np.vstack(topic_coherences).mean(0)
model_coherence = np.mean(avg_coherences)
logging.info("Avg coherence for model %d: %.5f" % (model_num, model_coherence))
coherences.append((avg_topic_coherences, model_coherence))
return coherences
| 20,901 | 40.804 | 117 | py |
poincare_glove | poincare_glove-master/gensim/models/__init__.py | """
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .coherencemodel import CoherenceModel # noqa:F401
from .hdpmodel import HdpModel # noqa:F401
from .ldamodel import LdaModel # noqa:F401
from .lsimodel import LsiModel # noqa:F401
from .tfidfmodel import TfidfModel # noqa:F401
from .rpmodel import RpModel # noqa:F401
from .logentropy_model import LogEntropyModel # noqa:F401
from .word2vec import Word2Vec # noqa:F401
from .doc2vec import Doc2Vec # noqa:F401
from .keyedvectors import KeyedVectors # noqa:F401
from .ldamulticore import LdaMulticore # noqa:F401
from .phrases import Phrases # noqa:F401
from .normmodel import NormModel # noqa:F401
from .atmodel import AuthorTopicModel # noqa:F401
from .ldaseqmodel import LdaSeqModel # noqa:F401
from .fasttext import FastText # noqa:F401
from .translation_matrix import TranslationMatrix, BackMappingTranslationMatrix # noqa:F401
from . import wrappers # noqa:F401
from . import deprecated # noqa:F401
from gensim import interfaces, utils
class VocabTransform(interfaces.TransformationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online"::
>>> old2new = {oldid: newid for newid, oldid in enumerate(ids_you_want_to_keep)}
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> ...
"""
def __init__(self, old2new, id2token=None):
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
| 2,369 | 36.619048 | 102 | py |
poincare_glove | poincare_glove-master/gensim/models/lda_dispatcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LDA computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lda_dispatcher
"""
from __future__ import with_statement
import argparse
import os
import sys
import logging
import threading
import time
from six import iteritems, itervalues
try:
from Queue import Queue
except ImportError:
from queue import Queue
import Pyro4
from gensim import utils
from gensim.models.lda_worker import LDA_WORKER_PREFIX
logger = logging.getLogger("gensim.models.lda_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LDA), in which case you can override
# this value from command line. ie. run "python ./lda_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should theoretically be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
LDA_DISPATCHER_PREFIX = 'gensim.lda_dispatcher'
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=MAX_JOBS_QUEUE, ns_conf=None):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
self.ns_conf = ns_conf if ns_conf is not None else {}
@Pyro4.expose
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to `worker.initialize()`).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS(**self.ns_conf) as ns:
self.callback = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
for name, uri in iteritems(ns.list(prefix=LDA_WORKER_PREFIX)):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i at %s", workerid, uri)
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.warning("unresponsive worker at %s, deleting it from the name server", uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in itervalues(self.workers)]
@Pyro4.expose
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job", worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
return job
@Pyro4.expose
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""
Merge states from across all workers and return the result.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
i = 0
count = 10
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
i += 1
if i > count:
i = 0
for workerid, worker in iteritems(self.workers):
logger.info("checking aliveness for worker %s", workerid)
worker.ping()
logger.info("merging states from %i workers", len(self.workers))
workers = list(self.workers.values())
result = workers[0].getstate()
for worker in workers[1:]:
result.merge(worker.getstate())
logger.info("sending out merged state")
return result
@Pyro4.expose
def reset(self, state):
"""
Initialize all workers for a new EM iterations.
"""
for workerid, worker in iteritems(self.workers):
logger.info("resetting worker %s", workerid)
worker.reset(state)
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between `dispatcher.jobdone()`
and `worker.requestjob()`.
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
self.workers[workerid].requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through Pyro proxies"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in iteritems(self.workers):
logger.info("terminating worker %s", workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
# endclass Dispatcher
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--maxsize",
help="How many jobs (=chunks of N documents) to keep 'pre-fetched' in a queue (default: %(default)s)",
type=int, default=MAX_JOBS_QUEUE
)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument("--no-broadcast", help="Disable broadcast (default: %(default)s)",
action='store_const', default=True, const=False)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument(
'-v', '--verbose',
help='Verbose flag',
action='store_const', dest="loglevel", const=logging.INFO, default=logging.WARNING
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {
"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac
}
utils.pyro_daemon(LDA_DISPATCHER_PREFIX, Dispatcher(maxsize=args.maxsize, ns_conf=ns_conf), ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
| 8,170 | 35.641256 | 112 | py |
poincare_glove | poincare_glove-master/gensim/models/base_any2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Contains base classes required for implementing any2vec algorithms."""
from gensim import utils
from gensim.models.keyedvectors import VanillaWordEmbeddingsKeyedVectors
import logging
from timeit import default_timer
import threading
from six.moves import xrange
from six import itervalues
from gensim import matutils
from numpy import float32 as REAL, ones, random, dtype, zeros, array, mean, std
from numpy.linalg import norm
from types import GeneratorType
from gensim.utils import deprecated
import sys
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue
logger = logging.getLogger(__name__)
class BaseAny2VecModel(utils.SaveLoad):
"""Base class for training, using and evaluating any2vec model.
Contains implementation for multi-threaded training.
"""
def __init__(self, workers=3, vector_size=100, vector_dtype=REAL, epochs=5, callbacks=(), batch_words=10000):
"""Initialize model parameters.
A subclass should initialize the following attributes:
- self.kv (instance of concrete implementation of `BaseKeyedVectors` interface)
- self.vocabulary (instance of concrete implementation of `BaseVocabBuilder` abstract class)
- self.trainables (instance of concrete implementation of `BaseTrainables` abstract class)
"""
self.vector_size = int(vector_size)
self.vector_dtype = vector_dtype
self.workers = int(workers)
self.epochs = epochs
self.train_count = 0
self.total_train_time = 0
self.batch_words = batch_words
self.model_trimmed_post_training = False
self.callbacks = callbacks
def _get_job_params(self, cur_epoch):
"""Get job parameters required for each batch."""
raise NotImplementedError()
def _set_train_params(self, **kwargs):
"""Set model parameters required for training"""
raise NotImplementedError()
def _update_job_params(self, job_params, epoch_progress, cur_epoch):
"""Get updated job parameters based on the epoch_progress and cur_epoch."""
raise NotImplementedError()
def _get_thread_working_mem(self):
"""Get private working memory per thread."""
raise NotImplementedError()
def _raw_word_count(self, job):
"""Get the number of words in a given job."""
raise NotImplementedError()
def _clear_post_train(self):
"""Resets certain properties of the model post training. eg. `keyedvectors.vectors_norm`."""
raise NotImplementedError()
def _do_train_job(self, data_iterable, job_parameters, thread_private_mem, thread_id=-1):
"""Train a single batch. Return 2-tuple `(effective word count, total word count)`."""
raise NotImplementedError()
def _check_training_sanity(self, epochs=None, total_examples=None, total_words=None, **kwargs):
"""Check that the training parameters provided make sense. e.g. raise error if `epochs` not provided."""
raise NotImplementedError()
def _worker_loop(self, job_queue, progress_queue, thread_id):
"""Train the model, lifting lists of data from the job_queue."""
thread_private_mem = self._get_thread_working_mem()
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
data_iterable, job_parameters = job
for callback in self.callbacks:
callback.on_batch_begin(self)
tally, raw_tally = self._do_train_job(data_iterable, job_parameters, thread_private_mem, thread_id)
for callback in self.callbacks:
callback.on_batch_end(self)
progress_queue.put((len(data_iterable), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def _job_producer(self, data_iterator, job_queue, cur_epoch=0, total_examples=None, total_words=None):
"""Fill jobs queue using the input `data_iterator`."""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_job_params = self._get_job_params(cur_epoch)
job_no = 0
for data_idx, data in enumerate(data_iterator):
data_length = self._raw_word_count([data])
# can we fit this sentence into the existing job batch?
if batch_size + data_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(data)
batch_size += data_length
else:
job_no += 1
job_queue.put((job_batch, next_job_params))
# update the learning rate for the next job
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
epoch_progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
epoch_progress = 1.0 * pushed_words / total_words
next_job_params = self._update_job_params(next_job_params, epoch_progress, cur_epoch)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [data], data_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
job_no += 1
job_queue.put((job_batch, next_job_params))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in xrange(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
def _log_progress(self, job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed):
raise NotImplementedError()
def _log_epoch_end(self, cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed):
raise NotImplementedError()
def _log_train_end(self, raw_word_count, trained_word_count, total_elapsed, job_tally):
raise NotImplementedError()
def _log_epoch_progress(self, progress_queue, job_queue, cur_epoch=0, total_examples=None, total_words=None,
report_delay=1.0):
example_count, trained_word_count, raw_word_count = 0, 0, 0
start, next_report = default_timer() - 0.00001, 1.0
job_tally = 0
ckpt_delay, next_ckpt = 1.0, 1.0
eval_delay, next_eval = 10.0, 1.0
unfinished_worker_count = self.workers
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
self._log_progress(
job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed)
next_report = elapsed + report_delay
if hasattr(self, "word_checkpoints") and elapsed >= next_ckpt:
self.word_checkpoints.add_checkpoints()
next_ckpt = elapsed + ckpt_delay
# run word similarity tasks and analogy task
if elapsed >= next_eval:
_, rw_spearman_corr, _ = self.wv.evaluate_word_pairs('../data/Stanford Rare Word/rw_validation.txt')
_, wordsim_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/wordsim353_validation.tsv')
_, simlex_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/simlex999_validation.txt')
# Compute embedding norms for top 10 most frequent words and least frequent words (words outside top 1000)
num_words = len(self.wv.vocab)
target_norms = array([norm(self.wv.word_vec(w)) for w in self.wv.index2entity[:num_words]])
context_norms = array([norm(self.trainables.syn1neg[idx]) for idx in range(num_words)])
top10_avg_norm_target, top10_avg_norm_context = mean(target_norms[:10]), mean(context_norms[:10])
not_top1000_avg_norm_target, not_top1000_avg_norm_context = mean(target_norms[1000:]), mean(context_norms[1000:])
# TODO: commented out for now, it's a bit slow I think
# # XXX: we currently compute analogy using 3COSADD because it is a lot faster than using Moebius
# # parallel transport
# analogy_eval = self.wv.accuracy(
# '../data/MSR-analogy-test-set/MSR_subset_3000',
# restrict_vocab=200000,
# most_similar=VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy)
print(
# "EPOCH {0:d} - EVAL: at {1:.2f}% examples - rareword {2:.4f}, wordsim {3:.4f}, simlex {4:.4f}, msr_subset {5:.4f} (3COSADD, out of {6:d} instances)".format(
"EPOCH {0:d} - EVAL: at {1:.2f}% examples - rareword {2:.4f}, wordsim {3:.4f}, simlex {4:.4f}, top10_avg_norm {5:.4f} / {6:.4f}, last_avg_norm {7:.4f} / {8:.4f}, norm_stddev {9:.4f}/{10:.4f}".format(
cur_epoch+1, 100.0 * example_count / total_examples,
rw_spearman_corr[0], wordsim_spearman[0], simlex_spearman[0],
top10_avg_norm_target, top10_avg_norm_context,
not_top1000_avg_norm_target, not_top1000_avg_norm_context,
std(target_norms), std(context_norms)))
# len(analogy_eval[-1]['correct']) / 3000,
# len(analogy_eval[-1]['correct']) + len(analogy_eval[-1]['incorrect'])))
sys.stdout.flush()
next_eval = elapsed + eval_delay
# all done; report the final stats
elapsed = default_timer() - start
self._log_epoch_end(
cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed)
self.total_train_time += elapsed
return trained_word_count, raw_word_count, job_tally
def _train_epoch(self, data_iterable, cur_epoch=0, total_examples=None,
total_words=None, queue_factor=2, report_delay=1.0):
"""Train one epoch."""
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [
threading.Thread(
target=self._worker_loop,
args=(job_queue, progress_queue, i))
for i in xrange(self.workers)
]
workers.append(threading.Thread(
target=self._job_producer,
args=(data_iterable, job_queue),
kwargs={'cur_epoch': cur_epoch, 'total_examples': total_examples, 'total_words': total_words}))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
trained_word_count, raw_word_count, job_tally = self._log_epoch_progress(
progress_queue, job_queue, cur_epoch=cur_epoch, total_examples=total_examples, total_words=total_words,
report_delay=report_delay)
return trained_word_count, raw_word_count, job_tally
def train(self, data_iterable, epochs=None, total_examples=None,
total_words=None, queue_factor=2, report_delay=1.0, callbacks=(), **kwargs):
"""Handle multi-worker training."""
self._set_train_params(**kwargs)
if callbacks:
self.callbacks = callbacks
self.epochs = epochs
self._check_training_sanity(
epochs=epochs,
total_examples=total_examples,
total_words=total_words, **kwargs)
for callback in self.callbacks:
callback.on_train_begin(self)
trained_word_count = 0
raw_word_count = 0
start = default_timer() - 0.00001
job_tally = 0
for cur_epoch in range(self.epochs):
for callback in self.callbacks:
callback.on_epoch_begin(self)
trained_word_count_epoch, raw_word_count_epoch, job_tally_epoch = self._train_epoch(
data_iterable, cur_epoch=cur_epoch, total_examples=total_examples, total_words=total_words,
queue_factor=queue_factor, report_delay=report_delay)
trained_word_count += trained_word_count_epoch
raw_word_count += raw_word_count_epoch
job_tally += job_tally_epoch
for callback in self.callbacks:
callback.on_epoch_end(self)
# Log overall time
total_elapsed = default_timer() - start
self._log_train_end(raw_word_count, trained_word_count, total_elapsed, job_tally)
self.train_count += 1 # number of times train() has been called
self._clear_post_train()
for callback in self.callbacks:
callback.on_train_end(self)
return trained_word_count, raw_word_count
@classmethod
def load(cls, fname_or_handle, **kwargs):
return super(BaseAny2VecModel, cls).load(fname_or_handle, **kwargs)
def save(self, fname_or_handle, **kwargs):
super(BaseAny2VecModel, self).save(fname_or_handle, **kwargs)
class BaseWordEmbeddingsModel(BaseAny2VecModel):
"""
Base class containing common methods for training, using & evaluating word embeddings learning models.
For example - `Word2Vec`, `FastText`, etc.
"""
def _clear_post_train(self):
raise NotImplementedError()
def _do_train_job(self, data_iterable, job_parameters, thread_private_mem):
raise NotImplementedError()
def _set_train_params(self, **kwargs):
raise NotImplementedError()
def __init__(self, sentences=None, workers=3, vector_size=100, vector_dtype=REAL, epochs=5, callbacks=(), batch_words=10000,
trim_rule=None, sg=0, alpha=0.025, window=5, seed=1, hs=0, negative=5, normalized=False,
euclid=0, poincare=0, torus=0, burnin_epochs=0, cbow_mean=1, min_alpha=0.0001, compute_loss=False,
fast_version=0, with_bias=False, **kwargs):
self.sg = int(sg)
if vector_size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha, self._alpha = float(alpha), float(alpha)
self.window = int(window)
self.random = random.RandomState(seed)
self.min_alpha, self._min_alpha = float(min_alpha), float(min_alpha)
self.hs = int(hs)
self.negative = int(negative)
self.normalized = bool(normalized)
self.euclid = int(euclid)
self.poincare = int(poincare)
self.torus = int(torus)
self.cbow_mean = int(cbow_mean)
self.compute_loss = bool(compute_loss)
self.batch_training_loss = 0
self.running_training_loss = 0
self.min_alpha_yet_reached = float(alpha)
self.with_bias = with_bias
self.corpus_count = 0
self._epochs = epochs
super(BaseWordEmbeddingsModel, self).__init__(
workers=workers, vector_size=vector_size, vector_dtype=vector_dtype, epochs=epochs, callbacks=callbacks,
batch_words=batch_words)
if fast_version < 0:
warnings.warn(
"C extension not loaded, training will be slow. "
"Install a C compiler and reinstall gensim for fast training."
)
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
# Perform burn-in step before training.
if burnin_epochs > 0:
self.train(
sentences, total_examples=self.corpus_count, epochs=burnin_epochs, start_alpha=self._alpha / 100,
end_alpha=self._alpha / 100, compute_loss=compute_loss)
self.train(
sentences, total_examples=self.corpus_count, epochs=self._epochs, start_alpha=self._alpha,
end_alpha=self._min_alpha, compute_loss=compute_loss)
else:
if trim_rule is not None:
logger.warning(
"The rule, if given, is only used to prune vocabulary during build_vocab() "
"and is not stored as part of the model. Model initialized without sentences. "
"trim_rule provided, if any, will be ignored.")
# for backward compatibility (aliases pointing to corresponding variables in trainables, vocabulary)
@property
@deprecated("Attribute will be removed in 4.0.0, use self.epochs instead")
def iter(self):
return self.epochs
@iter.setter
@deprecated("Attribute will be removed in 4.0.0, use self.epochs instead")
def iter(self, value):
self.epochs = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1 instead")
def syn1(self):
return self.trainables.syn1
@syn1.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1 instead")
def syn1(self, value):
self.trainables.syn1 = value
@syn1.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1 instead")
def syn1(self):
del self.trainables.syn1
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1neg instead")
def syn1neg(self):
return self.trainables.syn1neg
@syn1neg.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1neg instead")
def syn1neg(self, value):
self.trainables.syn1neg = value
@syn1neg.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.syn1neg instead")
def syn1neg(self):
del self.trainables.syn1neg
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_lockf instead")
def syn0_lockf(self):
return self.trainables.vectors_lockf
@syn0_lockf.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_lockf instead")
def syn0_lockf(self, value):
self.trainables.vectors_lockf = value
@syn0_lockf.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.vectors_lockf instead")
def syn0_lockf(self):
del self.trainables.vectors_lockf
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.layer1_size instead")
def layer1_size(self):
return self.trainables.layer1_size
@layer1_size.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.layer1_size instead")
def layer1_size(self, value):
self.trainables.layer1_size = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.hashfxn instead")
def hashfxn(self):
return self.trainables.hashfxn
@hashfxn.setter
@deprecated("Attribute will be removed in 4.0.0, use self.trainables.hashfxn instead")
def hashfxn(self, value):
self.trainables.hashfxn = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.sample instead")
def sample(self):
return self.vocabulary.sample
@sample.setter
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.sample instead")
def sample(self, value):
self.vocabulary.sample = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.min_count instead")
def min_count(self):
return self.vocabulary.min_count
@min_count.setter
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.min_count instead")
def min_count(self, value):
self.vocabulary.min_count = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.cum_table instead")
def cum_table(self):
return self.vocabulary.cum_table
@cum_table.setter
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.cum_table instead")
def cum_table(self, value):
self.vocabulary.cum_table = value
@cum_table.deleter
@deprecated("Attribute will be removed in 4.0.0, use self.vocabulary.cum_table instead")
def cum_table(self):
del self.vocabulary.cum_table
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index2word), self.vector_size, self.alpha
)
def build_vocab(self, sentences, update=False, progress_per=10000, keep_raw_vocab=False, trim_rule=None, **kwargs):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence is a iterable of iterables (can simply be a list of unicode strings too).
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
update : bool
If true, the new words in `sentences` will be added to model's vocab.
progress_per : int
Indicates how many words to process before showing/updating the progress.
"""
total_words, corpus_count = self.vocabulary.scan_vocab(
sentences, progress_per=progress_per, trim_rule=trim_rule)
self.corpus_count = corpus_count
report_values = self.vocabulary.prepare_vocab(
self.hs, self.negative, self.wv, update=update, keep_raw_vocab=keep_raw_vocab,
trim_rule=trim_rule, **kwargs)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.trainables.prepare_weights(self.hs, self.negative, self.wv, update=update, vocabulary=self.vocabulary,
with_bias=self.with_bias, torus=self.torus)
def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):
"""Build vocabulary from a dictionary of word frequencies.
Build model vocabulary from a passed dictionary that contains (word,word count).
Words must be of type unicode strings.
Parameters
----------
word_freq : dict
Word,Word_Count dictionary.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
corpus_count : int
Even if no corpus is provided, this argument can set corpus_count explicitly.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
update : bool
If true, the new provided words in `word_freq` dict will be added to model's vocab.
Examples
--------
>>> from gensim.models import Word2Vec
>>>
>>> model= Word2Vec()
>>> model.build_vocab_from_freq({"Word1": 15, "Word2": 20})
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i different raw word, with total frequency of %i",
len(raw_vocab), sum(itervalues(raw_vocab))
)
# Since no sentences are provided, this is to control the corpus_count
self.corpus_count = corpus_count or 0
self.vocabulary.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
report_values = self.vocabulary.prepare_vocab(
self.hs, self.negative, self.wv, keep_raw_vocab=keep_raw_vocab,
trim_rule=trim_rule, update=update)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.trainables.prepare_weights(
self.hs, self.negative, self.wv, update=update, vocabulary=self.vocabulary,
with_bias=self.with_bias, torus=self.torus) # build tables & arrays
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.wv.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['vectors'] = vocab_size * self.vector_size * dtype(self.vector_dtype).itemsize
if self.hs:
report['syn1'] = vocab_size * self.trainables.layer1_size * dtype(self.vector_dtype).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.trainables.layer1_size * dtype(self.vector_dtype).itemsize
if self.with_bias:
report['biases'] = vocab_size * 2 * dtype(self.vector_dtype).itemsize
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total']
)
return report
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=False, callbacks=()):
self.alpha = start_alpha or self.alpha
self.min_alpha = end_alpha or self.min_alpha
self.compute_loss = compute_loss
self.batch_training_loss = 0.0
self.running_training_loss = 0.0
return super(BaseWordEmbeddingsModel, self).train(
sentences, total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, compute_loss=compute_loss, callbacks=callbacks)
def get_attr(self, attr_name, default=0):
return getattr(self, attr_name) if hasattr(self, attr_name) else default
def _get_job_params(self, cur_epoch):
"""Get the parameter required for each batch."""
alpha = self.alpha - ((self.alpha - self.min_alpha) * float(cur_epoch) / self.epochs)
return alpha
def _update_job_params(self, job_params, epoch_progress, cur_epoch):
start_alpha = self.alpha
end_alpha = self.min_alpha
progress = (cur_epoch + epoch_progress) / self.epochs
next_alpha = start_alpha - (start_alpha - end_alpha) * progress
next_alpha = max(end_alpha, next_alpha)
self.min_alpha_yet_reached = next_alpha
return next_alpha
def _get_thread_working_mem(self):
work = matutils.zeros_aligned(self.trainables.layer1_size, dtype=self.vector_dtype) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.trainables.layer1_size, dtype=self.vector_dtype)
return work, neu1
def _raw_word_count(self, job):
"""Get the number of words in a given job."""
return sum(len(sentence) for sentence in job)
def _check_training_sanity(self, epochs=None, total_examples=None, total_words=None, **kwargs):
if self.alpha > self.min_alpha_yet_reached:
logger.warning(
"Effective 'alpha' higher than previous training cycles; previous alpha: {}, current alpha {}".format(
self.min_alpha_yet_reached, self.alpha
))
if self.model_trimmed_post_training:
raise RuntimeError("Parameters for training were discarded using model_trimmed_post_training method")
if not self.wv.vocab: # should be set by `build_vocab`
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.vectors):
raise RuntimeError("you must initialize vectors before training the model")
if not hasattr(self, 'corpus_count'):
raise ValueError(
"The number of examples in the training corpus is missing. "
"Please make sure this is set inside `build_vocab` function."
"Call the `build_vocab` function before calling `train`."
)
if total_words is None and total_examples is None:
raise ValueError(
"You must specify either total_examples or total_words, for proper job parameters updation"
"and progress calculations. "
"The usual value is total_examples=model.corpus_count."
)
if epochs is None:
raise ValueError("You must specify an explict epochs count. The usual value is epochs=model.epochs.")
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s negative=%s euclid=%s poincare=%s torus=%s window=%s",
self.workers, len(self.wv.vocab), self.trainables.layer1_size, self.sg,
self.hs, self.vocabulary.sample, self.negative, self.get_attr('euclid', 0),
self.get_attr('poincare', 0), self.get_attr('torus', 0), self.window
)
@classmethod
def load(cls, *args, **kwargs):
model = super(BaseWordEmbeddingsModel, cls).load(*args, **kwargs)
if model.negative and hasattr(model.wv, 'index2word'):
model.vocabulary.make_cum_table(model.wv) # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
if not hasattr(model.trainables, 'vectors_lockf') and hasattr(model.wv, 'vectors'):
model.trainables.vectors_lockf = ones(len(model.wv.vectors), dtype=model.wv.vectors.dtype)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.trainables.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
def _log_progress(self, job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed):
if total_examples:
# examples-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch + 1, 100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
else:
# words-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch + 1, 100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
def _log_epoch_end(self, cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed):
logger.info(
"EPOCH - %i : training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
cur_epoch + 1, raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed
)
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"EPOCH - %i : supplied example count (%i) did not equal expected count (%i)", cur_epoch + 1,
example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"EPOCH - %i : supplied raw word count (%i) did not equal expected count (%i)", cur_epoch + 1,
raw_word_count, total_words
)
def _log_train_end(self, raw_word_count, trained_word_count, total_elapsed, job_tally):
logger.info(
"training on a %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
raw_word_count, trained_word_count, total_elapsed, trained_word_count / total_elapsed
)
if job_tally < 10 * self.workers:
logger.warning(
"under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay"
)
# for backward compatibility
@deprecated("Method will be removed in 4.0.0, use self.wv.most_similar() instead")
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""
Deprecated. Use self.wv.most_similar() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.most_similar`
"""
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
@deprecated("Method will be removed in 4.0.0, use self.wv.wmdistance() instead")
def wmdistance(self, document1, document2):
"""
Deprecated. Use self.wv.wmdistance() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.wmdistance`
"""
return self.wv.wmdistance(document1, document2)
@deprecated("Method will be removed in 4.0.0, use self.wv.most_similar_cosmul() instead")
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""
Deprecated. Use self.wv.most_similar_cosmul() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.most_similar_cosmul`
"""
return self.wv.most_similar_cosmul(positive, negative, topn)
@deprecated("Method will be removed in 4.0.0, use self.wv.similar_by_word() instead")
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_word() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similar_by_word`
"""
return self.wv.similar_by_word(word, topn, restrict_vocab)
@deprecated("Method will be removed in 4.0.0, use self.wv.similar_by_vector() instead")
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_vector() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similar_by_vector`
"""
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
@deprecated("Method will be removed in 4.0.0, use self.wv.doesnt_match() instead")
def doesnt_match(self, words):
"""
Deprecated. Use self.wv.doesnt_match() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.doesnt_match`
"""
return self.wv.doesnt_match(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.similarity() instead")
def similarity(self, w1, w2):
"""
Deprecated. Use self.wv.similarity() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity`
"""
return self.wv.similarity(w1, w2)
@deprecated("Method will be removed in 4.0.0, use self.wv.n_similarity() instead")
def n_similarity(self, ws1, ws2):
"""
Deprecated. Use self.wv.n_similarity() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.n_similarity`
"""
return self.wv.n_similarity(ws1, ws2)
@deprecated("Method will be removed in 4.0.0, use self.wv.evaluate_word_pairs() instead")
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""
Deprecated. Use self.wv.evaluate_word_pairs() instead.
Refer to the documentation for `gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.evaluate_word_pairs`
"""
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
| 39,076 | 46.024067 | 219 | py |
poincare_glove | poincare_glove-master/gensim/models/translation_matrix.py | #!/usr/bin/env python
# encoding: utf-8
"""Produce translation matrix to translate the word from one language to another language, using either
standard nearest neighbour method or globally corrected neighbour retrieval method [1]_.
This method can be used to augment the existing phrase tables with more candidate translations, or
filter out errors from the translation tables and known dictionaries [2]_. What's more, It also work
for any two sets of named-vectors where there are some paired-guideposts to learn the transformation.
Examples
--------
**How to make translation between two set of word-vectors**
Initialize a word-vector models
>>> from gensim.models import KeyedVectors
>>> from gensim.test.utils import datapath, temporary_file
>>> from gensim.models import TranslationMatrix
>>>
>>> model_en = KeyedVectors.load_word2vec_format(datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
>>> model_it = KeyedVectors.load_word2vec_format(datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
Define word pairs (that will be used for construction of translation matrix
>>> word_pairs = [
... ("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque"),
... ("seven", "sette"), ("eight", "otto"),
... ("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
... ("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
... ]
Fit :class:`~gensim.models.translation_matrix.TranslationMatrix`
>>> trans_model = TranslationMatrix(model_en, model_it, word_pairs=word_pairs)
Apply model (translate words "dog" and "one")
>>> trans_model.translate(["dog", "one"], topn=3)
OrderedDict([('dog', [u'cane', u'gatto', u'cavallo']), ('one', [u'uno', u'due', u'tre'])])
Save / load model
>>> with temporary_file("model_file") as fname:
... trans_model.save(fname) # save model to file
... loaded_trans_model = TranslationMatrix.load(fname) # load model
**How to make translation between two :class:`~gensim.models.doc2vec.Doc2Vec` models**
Prepare data and models
>>> from gensim.test.utils import datapath
>>> from gensim.test.test_translation_matrix import read_sentiment_docs
>>> from gensim.models import Doc2Vec, BackMappingTranslationMatrix
>>>
>>> data = read_sentiment_docs(datapath("alldata-id-10.txt"))[:5]
>>> src_model = Doc2Vec.load(datapath("small_tag_doc_5_iter50"))
>>> dst_model = Doc2Vec.load(datapath("large_tag_doc_10_iter50"))
Train backward translation
>>> model_trans = BackMappingTranslationMatrix(data, src_model, dst_model)
>>> trans_matrix = model_trans.train(data)
Apply model
>>> result = model_trans.infer_vector(dst_model.docvecs[data[3].tags])
References
----------
.. [1] Dinu, Georgiana, Angeliki Lazaridou, and Marco Baroni. "Improving zero-shot learning by mitigating the
hubness problem", https://arxiv.org/abs/1412.6568
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
"Distributed Representations of Words and Phrases and their Compositionality", https://arxiv.org/abs/1310.4546
"""
import warnings
import numpy as np
from collections import OrderedDict
from gensim import utils
from six import string_types
class Space(object):
"""An auxiliary class for storing the the words space."""
def __init__(self, matrix, index2word):
"""
Parameters
----------
matrix : iterable of numpy.ndarray
Matrix that contains word-vectors.
index2word : list of str
Words which correspond to the `matrix`.
"""
self.mat = matrix
self.index2word = index2word
# build a dict to map word to index
self.word2index = {}
for idx, word in enumerate(self.index2word):
self.word2index[word] = idx
@classmethod
def build(cls, lang_vec, lexicon=None):
"""Construct a space class for the lexicon, if it's provided.
Parameters
----------
lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Model from which the vectors will be extracted.
lexicon : list of str, optional
Words which contains in the `lang_vec`, if `lexicon = None`, the lexicon is all the lang_vec's word.
Returns
-------
:class:`~gensim.models.translation_matrix.Space`
Object that stored word-vectors
"""
# `words` to store all the word that
# `mat` to store all the word vector for the word in 'words' list
words = []
mat = []
if lexicon is not None:
# if the lexicon is not provided, using the all the Keyedvectors's words as default
for item in lexicon:
words.append(item)
mat.append(lang_vec.syn0[lang_vec.vocab[item].index])
else:
for item in lang_vec.vocab.keys():
words.append(item)
mat.append(lang_vec.syn0[lang_vec.vocab[item].index])
return Space(mat, words)
def normalize(self):
"""Normalize the word vector's matrix."""
self.mat = self.mat / np.sqrt(np.sum(np.multiply(self.mat, self.mat), axis=1, keepdims=True))
class TranslationMatrix(utils.SaveLoad):
"""Objects of this class realize the translation matrix which map the source language to the target language.
The main methods are:
We map it to the other language space by computing z = Wx, then return the
word whose representation is close to z.
The details use seen the notebook [3]_
Examples
--------
>>> from gensim.models import KeyedVectors
>>> from gensim.test.utils import datapath, temporary_file
>>>
>>> model_en = KeyedVectors.load_word2vec_format(datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
>>> model_it = KeyedVectors.load_word2vec_format(datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"))
>>>
>>> word_pairs = [
... ("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque"),
... ("seven", "sette"), ("eight", "otto"),
... ("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
... ("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
... ]
>>>
>>> trans_model = TranslationMatrix(model_en, model_it)
>>> trans_model.train(word_pairs)
>>> trans_model.translate(["dog", "one"], topn=3)
OrderedDict([('dog', [u'cane', u'gatto', u'cavallo']), ('one', [u'uno', u'due', u'tre'])])
References
----------
.. [3] https://github.com/RaRe-Technologies/gensim/blob/3.2.0/docs/notebooks/translation_matrix.ipynb
"""
def __init__(self, source_lang_vec, target_lang_vec, word_pairs=None, random_state=None):
"""
Parameters
----------
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for source language.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for target language.
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
random_state : {None, int, array_like}, optional
Seed for random state.
"""
self.source_word = None
self.target_word = None
self.source_lang_vec = source_lang_vec
self.target_lang_vec = target_lang_vec
self.random_state = utils.get_random_state(random_state)
self.translation_matrix = None
self.source_space = None
self.target_space = None
if word_pairs is not None:
if len(word_pairs[0]) != 2:
raise ValueError("Each training data item must contain two different language words.")
self.train(word_pairs)
def train(self, word_pairs):
"""Build the translation matrix that mapping from source space to target space.
Parameters
----------
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
"""
self.source_word, self.target_word = zip(*word_pairs)
self.source_space = Space.build(self.source_lang_vec, set(self.source_word))
self.target_space = Space.build(self.target_lang_vec, set(self.target_word))
self.source_space.normalize()
self.target_space.normalize()
m1 = self.source_space.mat[[self.source_space.word2index[item] for item in self.source_word], :]
m2 = self.target_space.mat[[self.target_space.word2index[item] for item in self.target_word], :]
self.translation_matrix = np.linalg.lstsq(m1, m2, -1)[0]
def save(self, *args, **kwargs):
"""Save the model to file but ignoring the `source_space` and `target_space`"""
kwargs['ignore'] = kwargs.get('ignore', ['source_space', 'target_space'])
super(TranslationMatrix, self).save(*args, **kwargs)
def apply_transmat(self, words_space):
"""Map the source word vector to the target word vector using translation matrix.
Parameters
----------
words_space : :class:`~gensim.models.translation_matrix.Space`
Object that constructed for those words to be translate.
Returns
-------
:class:`~gensim.models.translation_matrix.Space`
Object that constructed for those mapped words.
"""
return Space(np.dot(words_space.mat, self.translation_matrix), words_space.index2word)
def translate(self, source_words, topn=5, gc=0, sample_num=None, source_lang_vec=None, target_lang_vec=None):
"""Translate the word from the source language to the target language.
Parameters
----------
source_words : {str, list of str}
Single word or a list of words to be translated
topn : int, optional
Number of words than will be returned as translation for each `source_words`
gc : int, optional
Define translation algorithm, if `gc == 0` - use standard NN retrieval,
otherwise, use globally corrected neighbour retrieval method (as described in [1]_).
sample_num : int, optional
Number of word to sample from the source lexicon, if `gc == 1`, then `sample_num` **must** be provided.
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New source language vectors for translation, by default, used the model's source language vector.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New target language vectors for translation, by default, used the model's target language vector.
Returns
-------
:class:`collections.OrderedDict`
Ordered dict where each item is `word`: [`translated_word_1`, `translated_word_2`, ...]
"""
if isinstance(source_words, string_types):
# pass only one word to translate
source_words = [source_words]
# If the language word vector not provided by user, use the model's
# language word vector as default
if source_lang_vec is None:
warnings.warn(
"The parameter source_lang_vec isn't specified, "
"use the model's source language word vector as default."
)
source_lang_vec = self.source_lang_vec
if target_lang_vec is None:
warnings.warn(
"The parameter target_lang_vec isn't specified, "
"use the model's target language word vector as default."
)
target_lang_vec = self.target_lang_vec
# If additional is provided, bootstrapping vocabulary from the source language word vector model.
if gc:
if sample_num is None:
raise RuntimeError(
"When using the globally corrected neighbour retrieval method, "
"the `sample_num` parameter(i.e. the number of words sampled from source space) must be provided."
)
lexicon = set(source_lang_vec.index2word)
addition = min(sample_num, len(lexicon) - len(source_words))
lexicon = self.random_state.choice(list(lexicon.difference(source_words)), addition)
source_space = Space.build(source_lang_vec, set(source_words).union(set(lexicon)))
else:
source_space = Space.build(source_lang_vec, source_words)
target_space = Space.build(target_lang_vec, )
# Normalize the source vector and target vector
source_space.normalize()
target_space.normalize()
# Map the source language to the target language
mapped_source_space = self.apply_transmat(source_space)
# Use the cosine similarity metric
sim_matrix = -np.dot(target_space.mat, mapped_source_space.mat.T)
# If `gc=1`, using corrected retrieval method
if gc:
srtd_idx = np.argsort(np.argsort(sim_matrix, axis=1), axis=1)
sim_matrix_idx = np.argsort(srtd_idx + sim_matrix, axis=0)
else:
sim_matrix_idx = np.argsort(sim_matrix, axis=0)
# Translate the words and for each word return the `topn` similar words
translated_word = OrderedDict()
for idx, word in enumerate(source_words):
translated_target_word = []
# Search the most `topn` similar words
for j in range(topn):
map_space_id = sim_matrix_idx[j, source_space.word2index[word]]
translated_target_word.append(target_space.index2word[map_space_id])
translated_word[word] = translated_target_word
return translated_word
class BackMappingTranslationMatrix(utils.SaveLoad):
"""Realize the BackMapping translation matrix which map the source model's document vector
to the target model's document vector(old model).
BackMapping translation matrix is used to learn a mapping for two document vector space which we
specify as source document vector and target document vector. The target document vector are trained
on superset corpus of source document vector, we can incrementally increase the vector in
the old model through the BackMapping translation matrix.
the details use seen the notebook [3]_.
Examples
--------
>>> from gensim.test.utils import datapath
>>> from gensim.test.test_translation_matrix import read_sentiment_docs
>>> from gensim.models import Doc2Vec, BackMappingTranslationMatrix
>>>
>>> data = read_sentiment_docs(datapath("alldata-id-10.txt"))[:5]
>>> src_model = Doc2Vec.load(datapath("small_tag_doc_5_iter50"))
>>> dst_model = Doc2Vec.load(datapath("large_tag_doc_10_iter50"))
>>>
>>> model_trans = BackMappingTranslationMatrix(src_model, dst_model)
>>> trans_matrix = model_trans.train(data)
>>>
>>> result = model_trans.infer_vector(dst_model.docvecs[data[3].tags])
"""
def __init__(self, source_lang_vec, target_lang_vec, tagged_docs=None, random_state=None):
"""
Parameters
----------
source_lang_vec : :class:`~gensim.models.doc2vec.Doc2Vec`
Source Doc2Vec model.
target_lang_vec : :class:`~gensim.models.doc2vec.Doc2Vec`
Target Doc2Vec model.
tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, optional.
Documents that will be used for training, both the source language document vector and
target language document vector trained on those tagged documents.
random_state : {None, int, array_like}, optional
Seed for random state.
"""
self.tagged_docs = tagged_docs
self.source_lang_vec = source_lang_vec
self.target_lang_vec = target_lang_vec
self.random_state = utils.get_random_state(random_state)
self.translation_matrix = None
if tagged_docs is not None:
self.train(tagged_docs)
def train(self, tagged_docs):
"""Build the translation matrix that mapping from the source model's vector to target model's vector
Parameters
----------
tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents
that will be used for training, both the source language document vector and
target language document vector trained on those tagged documents.
Returns
-------
numpy.ndarray
Translation matrix that mapping from the source model's vector to target model's vector.
"""
m1 = [self.source_lang_vec.docvecs[item.tags].flatten() for item in tagged_docs]
m2 = [self.target_lang_vec.docvecs[item.tags].flatten() for item in tagged_docs]
self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0]
return self.translation_matrix
def infer_vector(self, target_doc_vec):
"""Translate the target model's document vector to the source model's document vector
Parameters
----------
target_doc_vec : numpy.ndarray
Document vector from the target document, whose document are not in the source model.
Returns
-------
numpy.ndarray
Vector `target_doc_vec` in the source model.
"""
return np.dot(target_doc_vec, self.translation_matrix)
| 17,572 | 39.121005 | 118 | py |
poincare_glove | poincare_glove-master/gensim/models/doc2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_. See [#tutorial]_
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
If you're finished training a model (=no more updates, only querying), you can do
>>> model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True):
to trim unneeded model memory = use (much) less RAM.
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents.
http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
.. [#tutorial] Doc2vec in gensim tutorial,
https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/doc2vec-lee.ipynb
"""
import logging
import os
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue # noqa:F401
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, float32 as REAL, empty, ones, \
memmap as np_memmap, vstack, integer, dtype, sum as np_sum, add as np_add, repeat as np_repeat, concatenate
from gensim.utils import call_on_class_only
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.models.word2vec import Word2VecKeyedVectors, Word2VecVocab, Word2VecTrainables, train_cbow_pair,\
train_sg_pair, train_batch_sg
from six.moves import xrange
from six import string_types, integer_types, itervalues
from gensim.models.base_any2vec import BaseWordEmbeddingsModel
from gensim.models.keyedvectors import Doc2VecKeyedVectors
from types import GeneratorType
from gensim.utils import deprecated
logger = logging.getLogger(__name__)
try:
from gensim.models.doc2vec_inner import train_document_dbow, train_document_dm, train_document_dm_concat
from gensim.models.word2vec_inner import FAST_VERSION # blas-adaptation shared from word2vec
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_document_dbow(model, doc_words, doctag_indexes, alpha, work=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed bag of words model ("PV-DBOW") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
examples, exactly as per Word2Vec skip-gram training. (Without this option,
word vectors are neither consulted nor updated during DBOW doc vector training.)
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from doc2vec_inner instead.
"""
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
if train_words and learn_words:
train_batch_sg(model, [doc_words], alpha, work)
for doctag_index in doctag_indexes:
for word in doc_words:
train_sg_pair(
model, word, doctag_index, alpha, learn_vectors=learn_doctags, learn_hidden=learn_hidden,
context_vectors=doctag_vectors, context_locks=doctag_locks
)
return len(doc_words)
def train_document_dm(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
method implements the DM model with a projection (input) layer that is
either the sum or mean of the context vectors, depending on the model's
`dm_mean` configuration field. See `train_document_dm_concat()` for the DM
model with a concatenated input layer.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.wv.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.wv.vocab[w] for w in doc_words if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
count = len(word2_indexes) + len(doctag_indexes)
if model.cbow_mean and count > 1:
l1 /= count
neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
learn_vectors=False, learn_hidden=learn_hidden)
if not model.cbow_mean and count > 1:
neu1e /= count
if learn_doctags:
for i in doctag_indexes:
doctag_vectors[i] += neu1e * doctag_locks[i]
if learn_words:
for i in word2_indexes:
word_vectors[i] += neu1e * word_locks[i]
return len(word_vocabs)
def train_document_dm_concat(model, doc_words, doctag_indexes, alpha, work=None, neu1=None, learn_doctags=True,
learn_words=True, learn_hidden=True, word_vectors=None, word_locks=None,
doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document, using a
concatenation of the context window word vectors (rather than a sum or average).
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.wv.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.wv.vocab[w] for w in doc_words if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
doctag_len = len(doctag_indexes)
if doctag_len != model.dm_tag_count:
return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
null_word = model.wv.vocab['\0']
pre_pad_count = model.window
post_pad_count = model.window
padded_document_indexes = (
(pre_pad_count * [null_word.index]) # pre-padding
+ [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
+ (post_pad_count * [null_word.index]) # post-padding
)
for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
word_context_indexes = (
padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
+ padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
)
predict_word = model.wv.vocab[model.wv.index2word[padded_document_indexes[pos]]]
# numpy advanced-indexing copies; concatenate, flatten to 1d
l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
learn_hidden=learn_hidden, learn_vectors=False)
# filter by locks and shape for addition to source vectors
e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
neu1e_r = (neu1e.reshape(-1, model.vector_size)
* np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
if learn_doctags:
np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
if learn_words:
np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
return len(padded_document_indexes) - pre_pad_count - post_pad_count
class TaggedDocument(namedtuple('TaggedDocument', 'words tags')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags)
# for compatibility
@deprecated("Class will be removed in 4.0.0, use TaggedDocument instead")
class LabeledSentence(TaggedDocument):
pass
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also _index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class Doc2Vec(BaseWordEmbeddingsModel):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, comment=None, trim_rule=None, callbacks=(), **kwargs):
"""Initialize the model from an iterable of `documents`. Each document is a
TaggedDocument object that will be used for training.
Parameters
----------
documents : iterable of iterables
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
dm : int {1,0}
Defines the training algorithm. If `dm=1`, 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
iter : int
Number of iterations (epochs) over the corpus.
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
dm_mean : int {1,0}
If 0 , use the sum of the context word vectors. If 1, use the mean.
Only applies when `dm` is used in non-concatenative mode.
dm_concat : int {1,0}
If 1, use concatenation of context vectors rather than sum/average;
Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmetically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
dm_tag_count : int
Expected constant number of document tags per document, when using
dm_concat mode; default is 1.
dbow_words : int {1,0}
If set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; If 0, only trains doc-vectors (faster).
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
"""
if 'sentences' in kwargs:
raise DeprecationWarning(
"Parameter 'sentences' was renamed to 'documents', and will be removed in 4.0.0, "
"use 'documents' instead."
)
if 'iter' in kwargs:
warnings.warn("The parameter `iter` is deprecated, will be removed in 4.0.0, use `epochs` instead.")
kwargs['epochs'] = kwargs['iter']
if 'size' in kwargs:
warnings.warn("The parameter `size` is deprecated, will be removed in 4.0.0, use `vector_size` instead.")
kwargs['vector_size'] = kwargs['size']
super(Doc2Vec, self).__init__(
sg=(1 + dm) % 2,
null_word=dm_concat,
callbacks=callbacks,
fast_version=FAST_VERSION,
**kwargs)
self.load = call_on_class_only
if dm_mean is not None:
self.cbow_mean = dm_mean
self.dbow_words = int(dbow_words)
self.dm_concat = int(dm_concat)
self.dm_tag_count = int(dm_tag_count)
kwargs['null_word'] = dm_concat
vocabulary_keys = ['max_vocab_size', 'min_count', 'sample', 'sorted_vocab', 'null_word']
vocabulary_kwargs = dict((k, kwargs[k]) for k in vocabulary_keys if k in kwargs)
self.vocabulary = Doc2VecVocab(**vocabulary_kwargs)
trainables_keys = ['seed', 'hashfxn', 'window']
trainables_kwargs = dict((k, kwargs[k]) for k in trainables_keys if k in kwargs)
self.trainables = Doc2VecTrainables(
dm=dm, dm_concat=dm_concat, dm_tag_count=dm_tag_count,
vector_size=self.vector_size, **trainables_kwargs)
self.wv = Word2VecKeyedVectors(self.vector_size)
self.docvecs = docvecs or Doc2VecKeyedVectors(self.vector_size, docvecs_mapfile)
self.comment = comment
if documents is not None:
if isinstance(documents, GeneratorType):
raise TypeError("You can't pass a generator as the documents argument. Try an iterator.")
self.build_vocab(documents, trim_rule=trim_rule)
self.train(
documents, total_examples=self.corpus_count, epochs=self.epochs,
start_alpha=self.alpha, end_alpha=self.min_alpha, callbacks=callbacks)
@property
def dm(self):
"""int {1,0} : `dm=1` indicates 'distributed memory' (PV-DM) else
`distributed bag of words` (PV-DBOW) is used."""
return not self.sg # opposite of SG
@property
def dbow(self):
"""int {1,0} : `dbow=1` indicates `distributed bag of words` (PV-DBOW) else
'distributed memory' (PV-DM) is used."""
return self.sg # same as SG
def _set_train_params(self, **kwargs):
pass
def _clear_post_train(self):
self.clear_sims()
def clear_sims(self):
self.wv.vectors_norm = None
self.wv.vectors_docs_norm = None
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.wv.vocab = other_model.wv.vocab
self.wv.index2word = other_model.wv.index2word
self.vocabulary.cum_table = other_model.vocabulary.cum_table
self.corpus_count = other_model.corpus_count
self.docvecs.count = other_model.docvecs.count
self.docvecs.doctags = other_model.docvecs.doctags
self.docvecs.offset2doctag = other_model.docvecs.offset2doctag
self.trainables.reset_weights(self.hs, self.negative, self.wv, self.docvecs)
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
doctag_indexes = self.vocabulary.indexed_doctags(doc.tags, self.docvecs)
doctag_vectors = self.docvecs.vectors_docs
doctag_locks = self.trainables.vectors_docs_lockf
if self.sg:
tally += train_document_dbow(
self, doc.words, doctag_indexes, alpha, work, train_words=self.dbow_words,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
elif self.dm_concat:
tally += train_document_dm_concat(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
else:
tally += train_document_dm(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
return tally, self._raw_word_count(job)
def train(self, documents, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None,
word_count=0, queue_factor=2, report_delay=1.0, callbacks=()):
"""Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
The `documents` iterable can be simply a list of TaggedDocument elements.
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progress-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) **MUST** be provided (if the corpus is the same as was provided to
:meth:`~gensim.models.word2vec.Word2Vec.build_vocab()`, the count of examples in that corpus
will be available in the model's :attr:`corpus_count` property).
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case,
where :meth:`~gensim.models.word2vec.Word2Vec.train()` is only called once,
the model's cached `iter` value should be supplied as `epochs` value.
Parameters
----------
documents : iterable of iterables
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
See :class:`~gensim.models.doc2vec.TaggedBrownCorpus` or :class:`~gensim.models.doc2vec.TaggedLineDocument`
in :mod:`~gensim.models.doc2vec` module for such examples.
total_examples : int
Count of sentences.
total_words : int
Count of raw words in documents.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float
Initial learning rate.
end_alpha : float
Final learning rate. Drops linearly from `start_alpha`.
word_count : int
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
"""
super(Doc2Vec, self).train(
documents, total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, callbacks=callbacks)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.docvecs.offset2doctag) + 140 * len(self.docvecs.doctags)
def infer_vector(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Parameters
----------
doc_words : :obj: `list` of :obj: `str`
Document should be a list of (word) tokens.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
steps : int
Number of times to train the new document.
Returns
-------
:obj: `numpy.ndarray`
Returns the inferred vector for the new document.
"""
doctag_vectors, doctag_locks = self.trainables.get_doctag_trainables(doc_words, self.docvecs.vector_size)
doctag_indexes = [0]
work = zeros(self.trainables.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.trainables.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_document_dbow(
self, doc_words, doctag_indexes, alpha, work,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
elif self.dm_concat:
train_document_dm_concat(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
else:
train_document_dm(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def __getitem__(self, tag):
if isinstance(tag, string_types + integer_types + (integer,)):
if tag not in self.wv.vocab:
return self.docvecs[tag]
return self.wv[tag]
return vstack([self[i] for i in tag])
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.docvecs.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.vocabulary.min_count > 1:
segments.append('mc%d' % self.vocabulary.min_count)
if self.vocabulary.sample > 0:
segments.append('s%g' % self.vocabulary.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
def delete_temporary_training_data(self, keep_doctags_vectors=True, keep_inference=True):
"""Discard parameters that are used in training and score. Use if you're sure you're done training a model.
Parameters
----------
keep_doctags_vectors : bool
Set `keep_doctags_vectors` to False if you don't want to save doctags vectors,
in this case you can't to use docvecs's most_similar, similarity etc. methods.
keep_inference : bool
Set `keep_inference` to False if you don't want to store parameters that is used for infer_vector method
"""
if not keep_inference:
if hasattr(self.trainables, 'syn1'):
del self.trainables.syn1
if hasattr(self.trainables, 'syn1neg'):
del self.trainables.syn1neg
if hasattr(self.trainables, 'vectors_lockf'):
del self.trainables.vectors_lockf
self.model_trimmed_post_training = True
if self.docvecs and hasattr(self.docvecs, 'vectors_docs') and not keep_doctags_vectors:
del self.docvecs.vectors_docs
if self.docvecs and hasattr(self.trainables, 'vectors_docs_lockf'):
del self.trainables.vectors_docs_lockf
def save_word2vec_format(self, fname, doctag_vec=False, word_vec=True, prefix='*dt_', fvocab=None, binary=False):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
doctag_vec : bool
Indicates whether to store document vectors.
word_vec : bool
Indicates whether to store word vectors.
prefix : str
Uniquely identifies doctags from word vocab, and avoids collision
in case of repeated string in doctag and word vocab.
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
"""
total_vec = len(self.wv.vocab) + len(self.docvecs)
write_first_line = False
# save word vectors
if word_vec:
if not doctag_vec:
total_vec = len(self.wv.vocab)
self.wv.save_word2vec_format(fname, fvocab, binary, total_vec)
# save document vectors
if doctag_vec:
if not word_vec:
total_vec = len(self.docvecs)
write_first_line = True
self.docvecs.save_word2vec_format(
fname, prefix=prefix, fvocab=fvocab, total_vec=total_vec,
binary=binary, write_first_line=write_first_line)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
return self.docvecs.init_sims(replace=replace)
@classmethod
def load(cls, *args, **kwargs):
try:
return super(Doc2Vec, cls).load(*args, **kwargs)
except AttributeError:
logger.info('Model saved using code from earlier Gensim Version. Re-loading old model in a compatible way.')
from gensim.models.deprecated.doc2vec import load_old_doc2vec
return load_old_doc2vec(*args, **kwargs)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
return super(Doc2Vec, self).estimate_memory(vocab_size, report=report)
def build_vocab(self, documents, update=False, progress_per=10000, keep_raw_vocab=False, trim_rule=None, **kwargs):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence is a iterable of iterables (can simply be a list of unicode strings too).
Parameters
----------
documents : iterable of iterables
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
See :class:`~gensim.models.doc2vec.TaggedBrownCorpus` or :class:`~gensim.models.doc2vec.TaggedLineDocument`
in :mod:`~gensim.models.doc2vec` module for such examples.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
progress_per : int
Indicates how many words to process before showing/updating the progress.
update : bool
If true, the new words in `sentences` will be added to model's vocab.
"""
total_words, corpus_count = self.vocabulary.scan_vocab(
documents, self.docvecs, progress_per=progress_per, trim_rule=trim_rule)
self.corpus_count = corpus_count
report_values = self.vocabulary.prepare_vocab(
self.hs, self.negative, self.wv, update=update, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule,
**kwargs)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.trainables.prepare_weights(
self.hs, self.negative, self.wv, self.docvecs, update=update)
def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):
"""
Build vocabulary from a dictionary of word frequencies.
Build model vocabulary from a passed dictionary that contains (word,word count).
Words must be of type unicode strings.
Parameters
----------
word_freq : dict
Word,Word_Count dictionary.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
corpus_count : int
Even if no corpus is provided, this argument can set corpus_count explicitly.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
update : bool
If true, the new provided words in `word_freq` dict will be added to model's vocab.
Examples
--------
>>> from gensim.models.word2vec import Word2Vec
>>> model= Word2Vec()
>>> model.build_vocab_from_freq({"Word1": 15, "Word2": 20})
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i different raw word, with total frequency of %i",
len(raw_vocab), sum(itervalues(raw_vocab))
)
# Since no sentences are provided, this is to control the corpus_count
self.corpus_count = corpus_count or 0
self.vocabulary.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
report_values = self.vocabulary.prepare_vocab(
self.hs, self.negative, self.wv, keep_raw_vocab=keep_raw_vocab,
trim_rule=trim_rule, update=update)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.trainables.prepare_weights(
self.hs, self.negative, self.wv, self.docvecs, update=update)
class Doc2VecVocab(Word2VecVocab):
def __init__(self, max_vocab_size=None, min_count=5, sample=1e-3, sorted_vocab=True, null_word=0):
super(Doc2VecVocab, self).__init__(
max_vocab_size=max_vocab_size, min_count=min_count, sample=sample,
sorted_vocab=sorted_vocab, null_word=null_word)
def scan_vocab(self, documents, docvecs, progress_per=10000, trim_rule=None):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if not checked_string_types:
if isinstance(document.words, string_types):
logger.warning(
"Each 'words' should be a list of words (usually unicode strings). "
"First 'words' here is instead plain %s.",
type(document.words)
)
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info(
"PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags",
document_no, total_words, interval_rate, len(vocab), docvecs.count
)
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.note_doctag(tag, document_no, document_length, docvecs)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types and %i unique tags from a corpus of %i examples and %i words",
len(vocab), docvecs.count, document_no + 1, total_words
)
corpus_count = document_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
def note_doctag(self, key, document_no, document_length, docvecs):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, integer_types + (integer,)):
docvecs.max_rawint = max(docvecs.max_rawint, key)
else:
if key in docvecs.doctags:
docvecs.doctags[key] = docvecs.doctags[key].repeat(document_length)
else:
docvecs.doctags[key] = Doctag(len(docvecs.offset2doctag), document_length, 1)
docvecs.offset2doctag.append(key)
docvecs.count = docvecs.max_rawint + 1 + len(docvecs.offset2doctag)
def indexed_doctags(self, doctag_tokens, docvecs):
"""Return indexes and backing-arrays used in training examples."""
return [
Doc2VecKeyedVectors._int_index(index, docvecs.doctags, docvecs.max_rawint)
for index in doctag_tokens if self._tag_seen(index, docvecs)]
def _tag_seen(self, index, docvecs):
if isinstance(index, integer_types + (integer,)):
return index < docvecs.count
else:
return index in docvecs.doctags
class Doc2VecTrainables(Word2VecTrainables):
def __init__(self, dm=1, dm_concat=0, dm_tag_count=1, vector_size=100, seed=1, hashfxn=hash, window=5):
super(Doc2VecTrainables, self).__init__(
vector_size=vector_size, seed=seed, hashfxn=hashfxn)
if dm and dm_concat:
self.layer1_size = (dm_tag_count + (2 * window)) * vector_size
logger.info("using concatenative %d-dimensional layer1", self.layer1_size)
def prepare_weights(self, hs, negative, wv, docvecs, update=False):
"""Build tables and model weights based on final vocabulary settings."""
# set initial input/projection and hidden weights
if not update:
self.reset_weights(hs, negative, wv, docvecs)
else:
self.update_weights(hs, negative, wv)
def reset_weights(self, hs, negative, wv, docvecs, vocabulary=None):
super(Doc2VecTrainables, self).reset_weights(hs, negative, wv)
self.reset_doc_weights(docvecs)
def reset_doc_weights(self, docvecs):
length = max(len(docvecs.doctags), docvecs.count)
if docvecs.mapfile_path:
docvecs.vectors_docs = np_memmap(
docvecs.mapfile_path + '.vectors_docs', dtype=REAL, mode='w+', shape=(length, docvecs.vector_size)
)
self.vectors_docs_lockf = np_memmap(
docvecs.mapfile_path + '.vectors_docs_lockf', dtype=REAL, mode='w+', shape=(length,)
)
self.vectors_docs_lockf.fill(1.0)
else:
docvecs.vectors_docs = empty((length, docvecs.vector_size), dtype=REAL)
self.vectors_docs_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (
self.seed, Doc2VecKeyedVectors._index_to_doctag(i, docvecs.offset2doctag, docvecs.max_rawint))
docvecs.vectors_docs[i] = self.seeded_vector(seed, docvecs.vector_size)
def get_doctag_trainables(self, doc_words, vector_size):
doctag_vectors = zeros((1, vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words), vector_size)
doctag_locks = ones(1, dtype=REAL)
return doctag_vectors, doctag_locks
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a TaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield TaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one TaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
| 47,333 | 47.497951 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/ldaseqmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
# Based on Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
"""
Inspired by the Blei's original DTM code and paper.
Original DTM C/C++ code: https://github.com/blei-lab/dtm
DTM Paper: https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf
TODO:
The next steps to take this forward would be:
1) Include DIM mode. Most of the infrastructure for this is in place.
2) See if LdaPost can be replaced by LdaModel completely without breaking anything.
3) Heavy lifting going on in the sslm class - efforts can be made to cythonise mathematical methods.
- in particular, update_obs and the optimization takes a lot time.
4) Try and make it distributed, especially around the E and M step.
5) Remove all C/C++ coding style/syntax.
"""
from gensim import utils, matutils
from gensim.models import ldamodel
import numpy as np
from scipy.special import digamma, gammaln
from scipy import optimize
import logging
logger = logging.getLogger('gensim.models.ldaseqmodel')
class LdaSeqModel(utils.SaveLoad):
"""
The constructor estimates Dynamic Topic Model parameters based
on a training corpus.
If we have 30 documents, with 5 in the first time-slice, 10 in the second, and 15 in the third, we would
set up our model like this:
>>> ldaseq = LdaSeqModel(corpus=corpus, time_slice= [5, 10, 15], num_topics=5)
Model persistency is achieved through inheriting utils.SaveLoad.
>>> ldaseq.save("ldaseq")
saves the model to disk.
"""
def __init__(self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10,
initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10,
random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
"""
`corpus` is any iterable gensim corpus
`time_slice` as described above is a list which contains the number of documents in each time-slice
`id2word` is a mapping from word ids (integers) to words (strings).
It is used to determine the vocabulary size and printing topics.
`alphas` is a prior of your choice and should be a double or float value. default is 0.01
`num_topics` is the number of requested latent topics to be extracted from the training corpus.
`initalize` allows the user to decide how he wants to initialise the DTM model. Default is through gensim LDA.
You can use your own sstats of an LDA model previously trained as well by specifying 'own'
and passing a np matrix through sstats.
If you wish to just pass a previously used LDA model, pass it through `lda_model`
Shape of sstats is (vocab_len, num_topics)
`chain_variance` is a constant which dictates how the beta values evolve - it is a gaussian parameter
defined in the beta distribution.
`passes` is the number of passes of the initial LdaModel.
`random_state` can be a np.random.RandomState object or the seed for one, for the LdaModel.
"""
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.vocab_len = len(self.id2word)
elif len(self.id2word) > 0:
self.vocab_len = len(self.id2word)
else:
self.vocab_len = 0
if corpus is not None:
try:
self.corpus_len = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
self.corpus_len = sum(1 for _ in corpus)
self.time_slice = time_slice
if self.time_slice is not None:
self.num_time_slices = len(time_slice)
max_doc_len = 0
for line_no, line in enumerate(corpus):
if len(line) > max_doc_len:
max_doc_len = len(line)
self.max_doc_len = max_doc_len
self.num_topics = num_topics
self.num_time_slices = len(time_slice)
self.alphas = np.full(num_topics, alphas)
# topic_chains contains for each topic a 'state space language model' object
# which in turn has information about each topic
# the sslm class is described below and contains information
# on topic-word probabilities and doc-topic probabilities.
self.topic_chains = []
for topic in range(0, num_topics):
sslm_ = sslm(
num_time_slices=self.num_time_slices, vocab_len=self.vocab_len, num_topics=self.num_topics,
chain_variance=chain_variance, obs_variance=obs_variance
)
self.topic_chains.append(sslm_)
# the following are class variables which are to be integrated during Document Influence Model
self.top_doc_phis = None
self.influence = None
self.renormalized_influence = None
self.influence_sum_lgl = None
# if a corpus and time_slice is provided, depending on the user choice of initializing LDA, we start DTM.
if corpus is not None and time_slice is not None:
if initialize == 'gensim':
lda_model = ldamodel.LdaModel(
corpus, id2word=self.id2word, num_topics=self.num_topics,
passes=passes, alpha=self.alphas, random_state=random_state,
dtype=np.float64
)
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'ldamodel':
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'own':
self.sstats = sstats
# initialize model from sstats
self.init_ldaseq_ss(chain_variance, obs_variance, self.alphas, self.sstats)
# fit DTM
self.fit_lda_seq(corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize)
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
"""
Method to initialize State Space Language Model, topic wise.
"""
self.alphas = alpha
for k, chain in enumerate(self.topic_chains):
sstats = init_suffstats[:, k]
sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
# initialize the below matrices only if running DIM
# ldaseq.topic_chains[k].w_phi_l = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sum = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sq = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
"""
fit an lda sequence model:
for each time period:
set up lda model with E[log p(w|z)] and \alpha
for each document:
perform posterior inference
update sufficient statistics/likelihood
maximize topics
"""
LDASQE_EM_THRESHOLD = 1e-4
# if bound is low, then we increase iterations.
LOWER_ITER = 10
ITER_MULT_LOW = 2
MAX_ITER = 500
num_topics = self.num_topics
vocab_len = self.vocab_len
data_len = self.num_time_slices
corpus_len = self.corpus_len
bound = 0
convergence = LDASQE_EM_THRESHOLD + 1
iter_ = 0
while iter_ < em_min_iter or ((convergence > LDASQE_EM_THRESHOLD) and iter_ <= em_max_iter):
logger.info(" EM iter %i", iter_)
logger.info("E Step")
# TODO: bound is initialized to 0
old_bound = bound
# initiate sufficient statistics
topic_suffstats = []
for topic in range(0, num_topics):
topic_suffstats.append(np.resize(np.zeros(vocab_len * data_len), (vocab_len, data_len)))
# set up variables
gammas = np.resize(np.zeros(corpus_len * num_topics), (corpus_len, num_topics))
lhoods = np.resize(np.zeros(corpus_len * num_topics + 1), (corpus_len, num_topics + 1))
# compute the likelihood of a sequential corpus under an LDA
# seq model and find the evidence lower bound. This is the E - Step
bound, gammas = \
self.lda_seq_infer(corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize)
self.gammas = gammas
logger.info("M Step")
# fit the variational distribution. This is the M - Step
topic_bound = self.fit_lda_seq_topics(topic_suffstats)
bound += topic_bound
if (bound - old_bound) < 0:
# if max_iter is too low, increase iterations.
if lda_inference_max_iter < LOWER_ITER:
lda_inference_max_iter *= ITER_MULT_LOW
logger.info("Bound went down, increasing iterations to %i", lda_inference_max_iter)
# check for convergence
convergence = np.fabs((bound - old_bound) / old_bound)
if convergence < LDASQE_EM_THRESHOLD:
lda_inference_max_iter = MAX_ITER
logger.info("Starting final iterations, max iter is %i", lda_inference_max_iter)
convergence = 1.0
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, convergence)
iter_ += 1
return bound
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods,
iter_, lda_inference_max_iter, chunksize):
"""
Inference or E- Step.
This is used to set up the gensim LdaModel to be used for each time-slice.
It also allows for Document Influence Model code to be written in.
"""
num_topics = self.num_topics
vocab_len = self.vocab_len
bound = 0.0
lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda.topics = np.array(np.split(np.zeros(vocab_len * num_topics), vocab_len))
ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=num_topics, lda=lda)
model = "DTM"
if model == "DTM":
bound, gammas = self.inferDTMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
elif model == "DIM":
self.InfluenceTotalFixed(corpus)
bound, gammas = self.inferDIMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
return bound, gammas
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize):
"""
Computes the likelihood of a sequential corpus under an LDA seq model, and return the likelihood bound.
Need to pass the LdaSeq model, corpus, sufficient stats, gammas and lhoods matrices previously created,
and LdaModel and LdaPost class objects.
"""
doc_index = 0 # overall doc_index in corpus
time = 0 # current time-slice
doc_num = 0 # doc-index in current time-slice
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
time_slice = np.cumsum(np.array(self.time_slice))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
# iterates chunk size for constant memory footprint
for doc in chunk:
# this is used to update the time_slice and create a new lda_seq slice every new time_slice
if doc_index > time_slice[time]:
time += 1
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
doc_num = 0
gam = gammas[doc_index]
lhood = lhoods[doc_index]
ldapost.gamma = gam
ldapost.lhood = lhood
ldapost.doc = doc
# TODO: replace fit_lda_post with appropriate ldamodel functions, if possible.
if iter_ == 0:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, None, lda_inference_max_iter=lda_inference_max_iter
)
else:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, self, lda_inference_max_iter=lda_inference_max_iter
)
if topic_suffstats is not None:
topic_suffstats = LdaPost.update_lda_seq_ss(ldapost, time, doc, topic_suffstats)
gammas[doc_index] = ldapost.gamma
bound += doc_lhood
doc_index += 1
doc_num += 1
return bound, gammas
def make_lda_seq_slice(self, lda, time):
"""
set up the LDA model topic-word values with that of ldaseq.
"""
for k in range(0, self.num_topics):
lda.topics[:, k] = np.copy(self.topic_chains[k].e_log_prob[:, time])
lda.alpha = np.copy(self.alphas)
return lda
def fit_lda_seq_topics(self, topic_suffstats):
"""
Fit lda sequence topic wise.
"""
lhood = 0
for k, chain in enumerate(self.topic_chains):
logger.info("Fitting topic number %i", k)
lhood_term = sslm.fit_sslm(chain, topic_suffstats[k])
lhood += lhood_term
return lhood
def print_topic_times(self, topic, top_terms=20):
"""
Prints one topic showing each time-slice.
"""
topics = []
for time in range(0, self.num_time_slices):
topics.append(self.print_topic(topic, time, top_terms))
return topics
def print_topics(self, time=0, top_terms=20):
"""
Prints all topics in a particular time-slice.
"""
topics = []
for topic in range(0, self.num_topics):
topics.append(self.print_topic(topic, time, top_terms))
return topics
def print_topic(self, topic, time=0, top_terms=20):
"""
Topic is the topic number
Time is for a particular time_slice
top_terms is the number of terms to display
"""
topic = self.topic_chains[topic].e_log_prob
topic = np.transpose(topic)
topic = np.exp(topic[time])
topic = topic / topic.sum()
bestn = matutils.argsort(topic, top_terms, reverse=True)
beststr = [(self.id2word[id_], topic[id_]) for id_ in bestn]
return beststr
def doc_topics(self, doc_number):
"""
On passing the LdaSeqModel trained ldaseq object, the doc_number of your document in the corpus,
it returns the doc-topic probabilities of that document.
"""
doc_topic = np.copy(self.gammas)
doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis]
return doc_topic[doc_number]
def dtm_vis(self, time, corpus):
"""
returns term_frequency, vocab, doc_lengths, topic-term distributions and doc_topic distributions,
specified by pyLDAvis format.
all of these are needed to visualise topics for DTM for a particular time-slice via pyLDAvis.
input parameter is the year to do the visualisation.
"""
doc_topic = np.copy(self.gammas)
doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis]
topic_term = [
np.exp(np.transpose(chain.e_log_prob)[time]) / np.exp(np.transpose(chain.e_log_prob)[time]).sum()
for k, chain in enumerate(self.topic_chains)
]
doc_lengths = [len(doc) for doc_no, doc in enumerate(corpus)]
term_frequency = np.zeros(self.vocab_len)
for doc_no, doc in enumerate(corpus):
for pair in doc:
term_frequency[pair[0]] += pair[1]
vocab = [self.id2word[i] for i in range(0, len(self.id2word))]
# returns np arrays for doc_topic proportions, topic_term proportions, and document_lengths, term_frequency.
# these should be passed to the `pyLDAvis.prepare` method to visualise one time-slice of DTM topics.
return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab
def dtm_coherence(self, time):
"""
returns all topics of a particular time-slice without probabilitiy values for it to be used
for either "u_mass" or "c_v" coherence.
"""
coherence_topics = []
for topics in self.print_topics(time):
coherence_topic = []
for word, dist in topics:
coherence_topic.append(word)
coherence_topics.append(coherence_topic)
return coherence_topics
def __getitem__(self, doc):
"""
Similar to the LdaModel __getitem__ function, it returns topic proportions of a document passed.
"""
lda_model = \
ldamodel.LdaModel(num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda_model.topics = np.array(np.split(np.zeros(self.vocab_len * self.num_topics), self.vocab_len))
ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)
time_lhoods = []
for time in range(0, self.num_time_slices):
lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice
lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)
time_lhoods.append(lhood)
doc_topic = ldapost.gamma / ldapost.gamma.sum()
# should even the likelihoods be returned?
return doc_topic
class sslm(utils.SaveLoad):
"""
The sslm class is the State Space Language Model for DTM and contains the following information:
`obs` values contain the doc - topic ratios
`e_log_prob` contains topic - word ratios
`mean`, `fwd_mean` contains the mean values to be used for inference for each word for a time_slice
`variance`, `fwd_variance` contains the variance values to be used for inference for each word in a time_slice
`fwd_mean`, `fwd_variance` are the forward posterior values.
`zeta` is an extra variational parameter with a value for each time-slice
"""
def __init__(self, vocab_len=None, num_time_slices=None, num_topics=None, obs_variance=0.5, chain_variance=0.005):
self.vocab_len = vocab_len
self.num_time_slices = num_time_slices
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.num_topics = num_topics
# setting up matrices
self.obs = np.array(np.split(np.zeros(num_time_slices * vocab_len), vocab_len))
self.e_log_prob = np.array(np.split(np.zeros(num_time_slices * vocab_len), vocab_len))
self.mean = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.fwd_mean = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.fwd_variance = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.variance = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.zeta = np.zeros(num_time_slices)
# the following are class variables which are to be integrated during Document Influence Model
self.m_update_coeff = None
self.mean_t = None
self.variance_t = None
self.influence_sum_lgl = None
self.w_phi_l = None
self.w_phi_sum = None
self.w_phi_l_sq = None
self.m_update_coeff_g = None
def update_zeta(self):
"""
Updates the Zeta Variational Parameter.
Zeta is described in the appendix and is equal
to sum (exp(mean[word] + Variance[word] / 2)), over every time-slice.
It is the value of variational parameter zeta which maximizes the lower bound.
"""
for j, val in enumerate(self.zeta):
self.zeta[j] = np.sum(np.exp(self.mean[:, j + 1] + self.variance[:, j + 1] / 2))
return self.zeta
def compute_post_variance(self, word, chain_variance):
"""
Based on the Variational Kalman Filtering approach for Approximate Inference
[https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf]
This function accepts the word to compute variance for, along with the associated sslm class object,
and returns variance and fwd_variance
Computes Var[\beta_{t,w}] for t = 1:T
:math::
fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) =
(obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) *
(fwd\_variance[t - 1] + obs\_variance)
:math::
variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) =
fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 *
(variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))
"""
INIT_VARIANCE_CONST = 1000
T = self.num_time_slices
variance = self.variance[word]
fwd_variance = self.fwd_variance[word]
# forward pass. Set initial variance very high
fwd_variance[0] = chain_variance * INIT_VARIANCE_CONST
for t in range(1, T + 1):
if self.obs_variance:
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
else:
c = 0
fwd_variance[t] = c * (fwd_variance[t - 1] + chain_variance)
# backward pass
variance[T] = fwd_variance[T]
for t in range(T - 1, -1, -1):
if fwd_variance[t] > 0.0:
c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2)
else:
c = 0
variance[t] = (c * (variance[t + 1] - chain_variance)) + ((1 - c) * fwd_variance[t])
return variance, fwd_variance
def compute_post_mean(self, word, chain_variance):
"""
Based on the Variational Kalman Filtering approach for Approximate Inference
[https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf]
This function accepts the word to compute mean for, along with the associated sslm class object,
and returns mean and fwd_mean
Essentially a forward-backward to compute E[\beta_{t,w}] for t = 1:T.
Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t )
= (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] +
(1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta
Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T )
= fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) +
(1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]
"""
T = self.num_time_slices
obs = self.obs[word]
fwd_variance = self.fwd_variance[word]
mean = self.mean[word]
fwd_mean = self.fwd_mean[word]
# forward
fwd_mean[0] = 0
for t in range(1, T + 1):
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
fwd_mean[t] = c * fwd_mean[t - 1] + (1 - c) * obs[t - 1]
# backward pass
mean[T] = fwd_mean[T]
for t in range(T - 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""
Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is as implemented
in the original Blei DTM code.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""
Initialize State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes intial mean, variance and Topic-Word probabilities
for the first time-slice.
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = log_norm_counts + 1.0 / W
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(0, W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""
Fits variational distribution.
This is essentially the m-step.
Accepts the sstats for a particular topic for input and maximizes values for that topic.
Updates the values in the update_obs() and compute_expected_log_prob methods.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in list(zip(*[self.compute_post_variance(w, self.chain_variance) for w in range(0, W)])))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""
Compute log probability bound.
Forumula is as described in appendix of DTM by Blei. (formula no. 5)
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*[self.compute_post_mean(w, self.chain_variance) for w in range(0, w)]))
self.zeta = self.update_zeta()
for w in range(0, w):
val += (self.variance[w][0] - self.variance[w][t]) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(0, w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are aleays zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""
Function to perform optimization of obs. Parameters are suff_stats set up in the fit_sslm method.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.resize(np.zeros(T * (T + 1)), (T, T + 1))
norm_cutoff_obs = None
for w in range(0, W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(0, len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(0, T):
mean_deriv = mean_deriv_mtx[t]
mean_deriv = self.compute_mean_deriv(w, t, mean_deriv)
mean_deriv_mtx[t] = mean_deriv
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""
Used in helping find the optimum function.
computes derivative of E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
put the result in deriv, allocated T+1 vector
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""
Derivation of obs which is used in derivative function [df_obs] while optimizing.
"""
# flag
init_mult = 1000
T = self.num_time_slices
mean = self.mean[word]
variance = self.variance[word]
# only used for DIM mode
# w_phi_l = self.w_phi_l[word]
# m_update_coeff = self.m_update_coeff[word]
# temp_vector holds temporary zeta values
self.temp_vect = np.zeros(T)
for u in range(0, T):
self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)
for t in range(0, T):
mean_deriv = mean_deriv_mtx[t]
term1 = 0
term2 = 0
term3 = 0
term4 = 0
for u in range(1, T + 1):
mean_u = mean[u]
mean_u_prev = mean[u - 1]
dmean_u = mean_deriv[u]
dmean_u_prev = mean_deriv[u - 1]
term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)
term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u
model = "DTM"
if model == "DIM":
# do some stuff
pass
if self.chain_variance:
term1 = - (term1 / self.chain_variance)
term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)
else:
term1 = 0.0
deriv[t] = term1 + term2 + term3 + term4
return deriv
# endclass sslm
class LdaPost(utils.SaveLoad):
"""
Posterior values associated with each set of documents.
TODO: use **Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
to update phi, gamma. End game would be to somehow replace LdaPost entirely with LdaModel.
"""
def __init__(self, doc=None, lda=None, max_doc_len=None, num_topics=None, gamma=None, lhood=None):
self.doc = doc
self.lda = lda
self.gamma = gamma
self.lhood = lhood
if self.gamma is None:
self.gamma = np.zeros(num_topics)
if self.lhood is None:
self.lhood = np.zeros(num_topics + 1)
if max_doc_len is not None and num_topics is not None:
self.phi = np.resize(np.zeros(max_doc_len * num_topics), (max_doc_len, num_topics))
self.log_phi = np.resize(np.zeros(max_doc_len * num_topics), (max_doc_len, num_topics))
# the following are class variables which are to be integrated during Document Influence Model
self.doc_weight = None
self.renormalized_doc_weight = None
def update_phi(self, doc_number, time):
"""
Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(Ψ(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in
**Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(0, num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(0, num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n += 1 # increase iteration
return self.phi, self.log_phi
def update_gamma(self):
"""
update variational dirichlet parameters as described in the original Blei LDA paper:
gamma = alpha + sum(phi), over every topic for every word.
"""
self.gamma = np.copy(self.lda.alpha)
n = 0 # keep track of number of iterations for phi, log_phi
for word_id, count in self.doc:
phi_row = self.phi[n]
for k in range(0, self.lda.num_topics):
self.gamma[k] += phi_row[k] * count
n += 1
return self.gamma
def init_lda_post(self):
"""
Initialize variational posterior, does not return anything.
"""
total = sum(count for word_id, count in self.doc)
self.gamma.fill(self.lda.alpha[0] + float(total) / self.lda.num_topics)
self.phi[:len(self.doc), :] = 1.0 / self.lda.num_topics
# doc_weight used during DIM
# ldapost.doc_weight = None
def compute_lda_lhood(self):
"""
compute the likelihood bound
"""
num_topics = self.lda.num_topics
gamma_sum = np.sum(self.gamma)
# to be used in DIM
# sigma_l = 0
# sigma_d = 0
lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
self.lhood[num_topics] = lhood
# influence_term = 0
digsum = digamma(gamma_sum)
model = "DTM" # noqa:F841
for k in range(0, num_topics):
# below code only to be used in DIM mode
# if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
# influence_topic = ldapost.doc_weight[k]
# influence_term = \
# - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))
e_log_theta_k = digamma(self.gamma[k]) - digsum
lhood_term = \
(self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + \
gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
# TODO: check why there's an IF
n = 0
for word_id, count in self.doc:
if self.phi[n][k] > 0:
lhood_term += \
count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
n += 1
self.lhood[k] = lhood_term
lhood += lhood_term
# in case of DIM add influence term
# lhood += influence_term
return lhood
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,
lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
"""
Posterior inference for lda.
g, g3, g4 and g5 are matrices used in Document Influence Model and not used currently.
"""
self.init_lda_post()
# sum of counts in a doc
total = sum(count for word_id, count in self.doc)
model = "DTM"
if model == "DIM":
# if in DIM then we initialise some variables here
pass
lhood = self.compute_lda_lhood()
lhood_old = 0
converged = 0
iter_ = 0
# first iteration starts here
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
return lhood
def update_lda_seq_ss(self, time, doc, topic_suffstats):
"""
Update lda sequence sufficient statistics from an lda posterior.
This is very similar to the update_gamma method and uses the same formula.
"""
num_topics = self.lda.num_topics
for k in range(0, num_topics):
topic_ss = topic_suffstats[k]
n = 0
for word_id, count in self.doc:
topic_ss[word_id][time] += count * self.phi[n][k]
n += 1
topic_suffstats[k] = topic_ss
return topic_suffstats
# the following functions are used in update_obs as the function to optimize
def f_obs(x, *args):
"""
Function which we are optimising for minimizing obs.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
# flag
init_mult = 1000
T = len(x)
val = 0
term1 = 0
term2 = 0
# term 3 and 4 for DIM
term3 = 0
term4 = 0
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
mean = sslm.mean[word]
variance = sslm.variance[word]
# only used for DIM mode
# w_phi_l = sslm.w_phi_l[word]
# m_update_coeff = sslm.m_update_coeff[word]
for t in range(1, T + 1):
mean_t = mean[t]
mean_t_prev = mean[t - 1]
val = mean_t - mean_t_prev
term1 += val * val
term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]
model = "DTM"
if model == "DIM":
# stuff happens
pass
if sslm.chain_variance > 0.0:
term1 = - (term1 / (2 * sslm.chain_variance))
term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)
else:
term1 = 0.0
final = -(term1 + term2 + term3 + term4)
return final
def df_obs(x, *args):
"""
Derivative of function which optimises obs.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
model = "DTM"
if model == "DTM":
deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)
elif model == "DIM":
deriv = sslm.compute_obs_deriv_fixed(p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821
return np.negative(deriv)
| 43,899 | 37.207137 | 123 | py |
poincare_glove | poincare_glove-master/gensim/models/phrases.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automatically detect common phrases (multiword expressions) from a stream of sentences.
The phrases are collocations (frequently co-occurring tokens). See [1]_ for the
exact formula.
For example, if your input stream (=an iterable, with each value a list of token strings) looks like:
>>> print(list(sentence_stream))
[[u'the', u'mayor', u'of', u'new', u'york', u'was', u'there'],
[u'machine', u'learning', u'can', u'be', u'useful', u'sometimes'],
...,
]
you'd train the detector with:
>>> phrases = Phrases(sentence_stream)
and then create a performant Phraser object to transform any sentence (list of token strings)
using the standard gensim syntax:
>>> bigram = Phraser(phrases)
>>> sent = [u'the', u'mayor', u'of', u'new', u'york', u'was', u'there']
>>> print(bigram[sent])
[u'the', u'mayor', u'of', u'new_york', u'was', u'there']
(note `new_york` became a single token). As usual, you can also transform an entire
sentence stream using:
>>> print(list(bigram[any_sentence_stream]))
[[u'the', u'mayor', u'of', u'new_york', u'was', u'there'],
[u'machine_learning', u'can', u'be', u'useful', u'sometimes'],
...,
]
You can also continue updating the collocation counts with new sentences, by:
>>> bigram.add_vocab(new_sentence_stream)
These **phrase streams are meant to be used during text preprocessing, before
converting the resulting tokens into vectors using `Dictionary`**. See the
:mod:`gensim.models.word2vec` module for an example application of using phrase detection.
The detection can also be **run repeatedly**, to get phrases longer than
two tokens (e.g. `new_york_times`):
>>> trigram = Phrases(bigram[sentence_stream])
>>> sent = [u'the', u'new', u'york', u'times', u'is', u'a', u'newspaper']
>>> print(trigram[bigram[sent]])
[u'the', u'new_york_times', u'is', u'a', u'newspaper']
The common_terms parameter add a way to give special treatment to common terms (aka stop words)
such that their presence between two words
won't prevent bigram detection.
It allows to detect expressions like "bank of america" or "eye of the beholder".
>>> common_terms = ["of", "with", "without", "and", "or", "the", "a"]
>>> ct_phrases = Phrases(sentence_stream, common_terms=common_terms)
The phraser will of course inherit the common_terms from Phrases.
>>> ct_bigram = Phraser(ct_phrases)
>>> sent = [u'the', u'mayor', u'shows', u'his', u'lack', u'of', u'interest']
>>> print(bigram[sent])
[u'the', u'mayor', u'shows', u'his', u'lack_of_interest']
.. [1] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
"""
import sys
import os
import logging
import warnings
from collections import defaultdict
import functools as ft
import itertools as it
from math import log
import pickle
import six
from six import iteritems, string_types, PY2, next
from gensim import utils, interfaces
if PY2:
from inspect import getargspec
else:
from inspect import getfullargspec as getargspec
logger = logging.getLogger(__name__)
def _is_single(obj):
"""
Check whether `obj` is a single document or an entire corpus.
Returns (is_single, new) 2-tuple, where `new` yields the same
sequence as `obj`.
`obj` is a single document if it is an iterable of strings. It
is a corpus if it is an iterable of documents.
"""
obj_iter = iter(obj)
temp_iter = obj_iter
try:
peek = next(obj_iter)
obj_iter = it.chain([peek], obj_iter)
except StopIteration:
# An empty object is a single document
return True, obj
if isinstance(peek, string_types):
# It's a document, return the iterator
return True, obj_iter
if temp_iter == obj:
# Checking for iterator to the object
return False, obj_iter
else:
# If the first item isn't a string, assume obj is a corpus
return False, obj
class SentenceAnalyzer(object):
def score_item(self, worda, wordb, components, scorer):
vocab = self.vocab
if worda in vocab and wordb in vocab:
bigram = self.delimiter.join(components)
if bigram in vocab:
return scorer(
worda_count=float(vocab[worda]),
wordb_count=float(vocab[wordb]),
bigram_count=float(vocab[bigram]))
return -1
def analyze_sentence(self, sentence, threshold, common_terms, scorer):
"""Analyze a sentence
`sentence` a token list representing the sentence to be analyzed.
`threshold` the minimum score for a bigram to be taken into account
`common_terms` the list of common terms, they have a special treatment
`scorer` the scorer function, as given to Phrases
"""
s = [utils.any2utf8(w) for w in sentence]
last_uncommon = None
in_between = []
# adding None is a trick that helps getting an automatic happy ending
# has it won't be a common_word, nor score
for word in s + [None]:
is_common = word in common_terms
if not is_common and last_uncommon:
chain = [last_uncommon] + in_between + [word]
# test between last_uncommon
score = self.score_item(
worda=last_uncommon,
wordb=word,
components=chain,
scorer=scorer,
)
if score > threshold:
yield (chain, score)
last_uncommon = None
in_between = []
else:
# release words individually
for w in it.chain([last_uncommon], in_between):
yield (w, None)
in_between = []
last_uncommon = word
elif not is_common:
last_uncommon = word
else: # common term
if last_uncommon:
# wait for uncommon resolution
in_between.append(word)
else:
yield (word, None)
class PhrasesTransformation(interfaces.TransformationABC):
@classmethod
def load(cls, *args, **kwargs):
"""
Load a previously saved Phrases/Phraser class. Handles backwards compatibility from
older Phrases/Phraser versions which did not support pluggable scoring functions.
Otherwise, relies on utils.load
"""
model = super(PhrasesTransformation, cls).load(*args, **kwargs)
# update older models
# if no scoring parameter, use default scoring
if not hasattr(model, 'scoring'):
logger.info('older version of %s loaded without scoring function', cls.__name__)
logger.info('setting pluggable scoring method to original_scorer for compatibility')
model.scoring = original_scorer
# if there is a scoring parameter, and it's a text value, load the proper scoring function
if hasattr(model, 'scoring'):
if isinstance(model.scoring, six.string_types):
if model.scoring == 'default':
logger.info('older version of %s loaded with "default" scoring parameter', cls.__name__)
logger.info('setting scoring method to original_scorer pluggable scoring method for compatibility')
model.scoring = original_scorer
elif model.scoring == 'npmi':
logger.info('older version of %s loaded with "npmi" scoring parameter', cls.__name__)
logger.info('setting scoring method to npmi_scorer pluggable scoring method for compatibility')
model.scoring = npmi_scorer
else:
raise ValueError(
'failed to load %s model with unknown scoring setting %s' % (cls.__name__, model.scoring))
# if there is non common_terms attribute, initialize
if not hasattr(model, "common_terms"):
logger.info('older version of %s loaded without common_terms attribute', cls.__name__)
logger.info('setting common_terms to empty set')
model.common_terms = frozenset()
return model
class Phrases(SentenceAnalyzer, PhrasesTransformation):
"""
Detect phrases, based on collected collocation counts. Adjacent words that appear
together more frequently than expected are joined together with the `_` character.
It can be used to generate phrases on the fly, using the `phrases[sentence]`
and `phrases[corpus]` syntax.
"""
def __init__(self, sentences=None, min_count=5, threshold=10.0,
max_vocab_size=40000000, delimiter=b'_', progress_per=10000,
scoring='default', common_terms=frozenset()):
"""
Initialize the model from an iterable of `sentences`. Each sentence must be
a list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider a generator that streams the sentences directly from disk/network,
without storing everything in RAM. See :class:`BrownCorpus`,
:class:`Text8Corpus` or :class:`LineSentence` in the :mod:`gensim.models.word2vec`
module for such examples.
`min_count` ignore all words and bigrams with total collected count lower
than this.
`threshold` represents a score threshold for forming the phrases (higher means
fewer phrases). A phrase of words `a` followed by `b` is accepted if the score of the
phrase is greater than threshold. see the `scoring` setting.
`max_vocab_size` is the maximum size of the vocabulary. Used to control
pruning of less common words, to keep memory under control. The default
of 40M needs about 3.6GB of RAM; increase/decrease `max_vocab_size` depending
on how much available memory you have.
`delimiter` is the glue character used to join collocation tokens, and
should be a byte string (e.g. b'_').
`scoring` specifies how potential phrases are scored for comparison to the `threshold`
setting. `scoring` can be set with either a string that refers to a built-in scoring function,
or with a function with the expected parameter names. Two built-in scoring functions are available
by setting `scoring` to a string:
'default': from "Efficient Estimaton of Word Representations in Vector Space" by
Mikolov, et. al.:
(count(worda followed by wordb) - min_count) * N /
(count(worda) * count(wordb)) > threshold`, where `N` is the total vocabulary size.
'npmi': normalized pointwise mutual information, from "Normalized (Pointwise) Mutual
Information in Colocation Extraction" by Gerlof Bouma:
ln(prop(worda followed by wordb) / (prop(worda)*prop(wordb))) /
- ln(prop(worda followed by wordb)
where prop(n) is the count of n / the count of everything in the entire corpus.
'npmi' is more robust when dealing with common words that form part of common bigrams, and
ranges from -1 to 1, but is slower to calculate than the default.
To use a custom scoring function, create a function with the following parameters and set the `scoring`
parameter to the custom function. You must use all the parameters in your function call, even if the
function does not require all the parameters.
worda_count: number of occurrances in `sentences` of the first token in the phrase being scored
wordb_count: number of occurrances in `sentences` of the second token in the phrase being scored
bigram_count: number of occurrances in `sentences` of the phrase being scored
len_vocab: the number of unique tokens in `sentences`
min_count: the `min_count` setting of the Phrases class
corpus_word_count: the total number of (non-unique) tokens in `sentences`
A scoring function without any of these parameters (even if the parameters are not used) will
raise a ValueError on initialization of the Phrases class. The scoring function must be picklable.
`common_terms` is an optionnal list of "stop words" that won't affect frequency count
of expressions containing them.
"""
if min_count <= 0:
raise ValueError("min_count should be at least 1")
if threshold <= 0 and scoring == 'default':
raise ValueError("threshold should be positive for default scoring")
if scoring == 'npmi' and (threshold < -1 or threshold > 1):
raise ValueError("threshold should be between -1 and 1 for npmi scoring")
# set scoring based on string
# intentially override the value of the scoring parameter rather than set self.scoring here,
# to still run the check of scoring function parameters in the next code block
if isinstance(scoring, six.string_types):
if scoring == 'default':
scoring = original_scorer
elif scoring == 'npmi':
scoring = npmi_scorer
else:
raise ValueError('unknown scoring method string %s specified' % (scoring))
scoring_parameters = [
'worda_count', 'wordb_count', 'bigram_count', 'len_vocab', 'min_count', 'corpus_word_count'
]
if callable(scoring):
if all(parameter in getargspec(scoring)[0] for parameter in scoring_parameters):
self.scoring = scoring
else:
raise ValueError('scoring function missing expected parameters')
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.vocab = defaultdict(int) # mapping between utf8 token => its count
self.min_reduce = 1 # ignore any tokens with count smaller than this
self.delimiter = delimiter
self.progress_per = progress_per
self.corpus_word_count = 0
self.common_terms = frozenset(utils.any2utf8(w) for w in common_terms)
# ensure picklability of custom scorer
try:
test_pickle = pickle.dumps(self.scoring)
load_pickle = pickle.loads(test_pickle)
except pickle.PickleError:
raise pickle.PickleError('unable to pickle custom Phrases scoring function')
finally:
del(test_pickle)
del(load_pickle)
if sentences is not None:
self.add_vocab(sentences)
@classmethod
def load(cls, *args, **kwargs):
"""
Load a previously saved Phrases class. Handles backwards compatibility from
older Phrases versions which did not support pluggable scoring functions.
"""
model = super(Phrases, cls).load(*args, **kwargs)
if not hasattr(model, 'corpus_word_count'):
logger.info('older version of %s loaded without corpus_word_count', cls.__name__)
logger.info('Setting it to 0, do not use it in your scoring function.')
model.corpus_word_count = 0
return model
def __str__(self):
"""Get short string representation of this phrase detector."""
return "%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>" % (
self.__class__.__name__, len(self.vocab), self.min_count,
self.threshold, self.max_vocab_size
)
@staticmethod
def learn_vocab(sentences, max_vocab_size, delimiter=b'_', progress_per=10000,
common_terms=frozenset()):
"""Collect unigram/bigram counts from the `sentences` iterable."""
sentence_no = -1
total_words = 0
logger.info("collecting all words and their counts")
vocab = defaultdict(int)
min_reduce = 1
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words and %i word types",
sentence_no, total_words, len(vocab),
)
s = [utils.any2utf8(w) for w in sentence]
last_uncommon = None
in_between = []
for word in s:
if word not in common_terms:
vocab[word] += 1
if last_uncommon is not None:
components = it.chain([last_uncommon], in_between, [word])
vocab[delimiter.join(components)] += 1
last_uncommon = word
in_between = []
elif last_uncommon is not None:
in_between.append(word)
total_words += 1
if len(vocab) > max_vocab_size:
utils.prune_vocab(vocab, min_reduce)
min_reduce += 1
logger.info(
"collected %i word types from a corpus of %i words (unigram + bigrams) and %i sentences",
len(vocab), total_words, sentence_no + 1
)
return min_reduce, vocab, total_words
def add_vocab(self, sentences):
"""
Merge the collected counts `vocab` into this phrase detector.
"""
# uses a separate vocab to collect the token counts from `sentences`.
# this consumes more RAM than merging new sentences into `self.vocab`
# directly, but gives the new sentences a fighting chance to collect
# sufficient counts, before being pruned out by the (large) accummulated
# counts collected in previous learn_vocab runs.
min_reduce, vocab, total_words = self.learn_vocab(
sentences, self.max_vocab_size, self.delimiter, self.progress_per, self.common_terms)
self.corpus_word_count += total_words
if len(self.vocab) > 0:
logger.info("merging %i counts into %s", len(vocab), self)
self.min_reduce = max(self.min_reduce, min_reduce)
for word, count in iteritems(vocab):
self.vocab[word] += count
if len(self.vocab) > self.max_vocab_size:
utils.prune_vocab(self.vocab, self.min_reduce)
self.min_reduce += 1
logger.info("merged %s", self)
else:
# in common case, avoid doubling gigantic dict
logger.info("using %i counts as vocab in %s", len(vocab), self)
self.vocab = vocab
def export_phrases(self, sentences, out_delimiter=b' ', as_tuples=False):
"""
Generate an iterator that contains all phrases in given 'sentences'
Example::
>>> sentences = Text8Corpus(path_to_corpus)
>>> bigram = Phrases(sentences, min_count=5, threshold=100)
>>> for phrase, score in bigram.export_phrases(sentences):
... print(u'{0}\t{1}'.format(phrase, score))
then you can debug the threshold with generated tsv
"""
analyze_sentence = ft.partial(
self.analyze_sentence,
threshold=self.threshold,
common_terms=self.common_terms,
scorer=ft.partial(
self.scoring,
len_vocab=float(len(self.vocab)),
min_count=float(self.min_count),
corpus_word_count=float(self.corpus_word_count),
),
)
for sentence in sentences:
bigrams = analyze_sentence(sentence)
# keeps only not None scores
filtered = ((words, score) for words, score in bigrams if score is not None)
for words, score in filtered:
if as_tuples:
yield (tuple(words), score)
else:
yield (out_delimiter.join(words), score)
def __getitem__(self, sentence):
"""
Convert the input tokens `sentence` (=list of unicode strings) into phrase
tokens (=list of unicode strings, where detected phrases are joined by u'_').
If `sentence` is an entire corpus (iterable of sentences rather than a single
sentence), return an iterable that converts each of the corpus' sentences
into phrases on the fly, one after another.
Example::
>>> sentences = Text8Corpus(path_to_corpus)
>>> bigram = Phrases(sentences, min_count=5, threshold=100)
>>> for sentence in phrases[sentences]:
... print(u' '.join(s))
he refuted nechaev other anarchists sometimes identified as pacifist anarchists advocated complete
nonviolence leo_tolstoy
"""
warnings.warn("For a faster implementation, use the gensim.models.phrases.Phraser class")
delimiter = self.delimiter # delimiter used for lookup
is_single, sentence = _is_single(sentence)
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
return self._apply(sentence)
delimiter = self.delimiter
bigrams = self.analyze_sentence(
sentence,
threshold=self.threshold,
common_terms=self.common_terms,
scorer=ft.partial(
self.scoring,
len_vocab=float(len(self.vocab)),
min_count=float(self.min_count),
corpus_word_count=float(self.corpus_word_count),
),
)
new_s = []
for words, score in bigrams:
if score is not None:
words = delimiter.join(words)
new_s.append(words)
return [utils.to_unicode(w) for w in new_s]
# these two built-in scoring methods don't cast everything to float because the casting is done in the call
# to the scoring method in __getitem__ and export_phrases.
# calculation of score based on original mikolov word2vec paper
def original_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
return (bigram_count - min_count) / worda_count / wordb_count * len_vocab
# normalized PMI, requires corpus size
def npmi_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
pa = worda_count / corpus_word_count
pb = wordb_count / corpus_word_count
pab = bigram_count / corpus_word_count
return log(pab / (pa * pb)) / -log(pab)
def pseudocorpus(source_vocab, sep, common_terms=frozenset()):
"""Feeds source_vocab's compound keys back to it, to discover phrases"""
for k in source_vocab:
if sep not in k:
continue
unigrams = k.split(sep)
for i in range(1, len(unigrams)):
if unigrams[i - 1] not in common_terms:
# do not join common terms
cterms = list(it.takewhile(lambda w: w in common_terms, unigrams[i:]))
tail = unigrams[i + len(cterms):]
components = [sep.join(unigrams[:i])] + cterms
if tail:
components.append(sep.join(tail))
yield components
class Phraser(SentenceAnalyzer, PhrasesTransformation):
"""
Minimal state & functionality to apply results of a Phrases model to tokens.
After the one-time initialization, a Phraser will be much smaller and
somewhat faster than using the full Phrases model.
Reflects the results of the source model's `min_count`, `threshold`, and
`scoring` settings. (You can tamper with those & create a new Phraser to try
other values.)
"""
def __init__(self, phrases_model):
self.threshold = phrases_model.threshold
self.min_count = phrases_model.min_count
self.delimiter = phrases_model.delimiter
self.scoring = phrases_model.scoring
self.common_terms = phrases_model.common_terms
corpus = self.pseudocorpus(phrases_model)
self.phrasegrams = {}
logger.info('source_vocab length %i', len(phrases_model.vocab))
count = 0
for bigram, score in phrases_model.export_phrases(corpus, self.delimiter, as_tuples=True):
if bigram in self.phrasegrams:
logger.info('Phraser repeat %s', bigram)
self.phrasegrams[bigram] = (phrases_model.vocab[self.delimiter.join(bigram)], score)
count += 1
if not count % 50000:
logger.info('Phraser added %i phrasegrams', count)
logger.info('Phraser built with %i %i phrasegrams', count, len(self.phrasegrams))
def pseudocorpus(self, phrases_model):
return pseudocorpus(phrases_model.vocab, phrases_model.delimiter,
phrases_model.common_terms)
def score_item(self, worda, wordb, components, scorer):
"""score is retained from original dataset
"""
try:
return self.phrasegrams[tuple(components)][1]
except KeyError:
return -1
def __getitem__(self, sentence):
"""
Convert the input tokens `sentence` (=list of unicode strings) into phrase
tokens (=list of unicode strings, where detected phrases are joined by u'_'
(or other configured delimiter-character).
If `sentence` is an entire corpus (iterable of sentences rather than a single
sentence), return an iterable that converts each of the corpus' sentences
into phrases on the fly, one after another.
"""
is_single, sentence = _is_single(sentence)
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
return self._apply(sentence)
delimiter = self.delimiter
bigrams = self.analyze_sentence(
sentence,
threshold=self.threshold,
common_terms=self.common_terms,
scorer=None) # we will use our score_item function redefinition
new_s = []
for words, score in bigrams:
if score is not None:
words = delimiter.join(words)
new_s.append(words)
return [utils.to_unicode(w) for w in new_s]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
infile = sys.argv[1]
from gensim.models import Phrases # noqa:F811 for pickle
from gensim.models.word2vec import Text8Corpus
sentences = Text8Corpus(infile)
# test_doc = LineSentence('test/test_data/testcorpus.txt')
bigram = Phrases(sentences, min_count=5, threshold=100)
for s in bigram[sentences]:
print(utils.to_utf8(u' '.join(s)))
| 27,332 | 40.921779 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/keyedvectors.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Word vector storage and similarity look-ups.
Common code independent of the way the vectors are trained(Word2Vec, FastText, WordRank, VarEmbed etc)
The word vectors are considered read-only in this class.
Initialize the vectors by training e.g. Word2Vec::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with::
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname)
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance::
>>> from gensim.models import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the vectors. Some of them
are already built-in::
>>> word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> word_vectors.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> word_vectors.similarity('woman', 'man')
0.73723527
Correlation with human opinion on word similarity::
>>> word_vectors.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> word_vectors.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # noqa:F401
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import dot, zeros, float32 as REAL, float64 as DOUBLE, empty, memmap as np_memmap, \
double, array, vstack, sqrt, newaxis, integer, \
ndarray, sum as np_sum, average, prod, argmax, divide as np_divide, tanh, arctanh, arccosh, cos, log
from numpy.linalg import norm
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, integer_types
from six.moves import xrange, zip
from scipy import sparse, stats
from gensim.utils import deprecated
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format, _compute_ngrams, _ft_hash
logger = logging.getLogger(__name__)
EPS = 1e-10
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class BaseKeyedVectors(utils.SaveLoad):
def __init__(self, vector_size, vector_dtype=REAL):
self.vectors = []
self.vocab = {}
self.vector_size = vector_size
self.vector_dtype = vector_dtype
self.index2entity = []
def save(self, fname_or_handle, **kwargs):
super(BaseKeyedVectors, self).save(fname_or_handle, **kwargs)
@classmethod
def load(cls, fname_or_handle, **kwargs):
return super(BaseKeyedVectors, cls).load(fname_or_handle, **kwargs)
def similarity(self, entity1, entity2):
"""Compute cosine similarity between entities, specified by string tag.
"""
raise NotImplementedError()
def most_similar(self, **kwargs):
"""Find the top-N most similar entities.
Possibly have `positive` and `negative` list of entities in `**kwargs`.
"""
return NotImplementedError()
def distance(self, entity1, entity2):
"""Compute distance between vectors of two input entities, specified by string tag.
"""
raise NotImplementedError()
def distances(self, entity1, other_entities=()):
"""Compute distances from given entity (string tag) to all entities in `other_entity`.
If `other_entities` is empty, return distance between `entity1` and all entities in vocab.
"""
raise NotImplementedError()
def embedding_norm(self, word):
"""Compute the norm of the target embedding for a given word
"""
raise NotImplementedError()
def get_vector(self, entity):
"""Accept a single entity as input, specified by string tag.
Returns the entity's representations in vector space, as a 1D numpy array.
"""
if entity in self.vocab:
result = self.vectors[self.vocab[entity].index]
result.setflags(write=False)
return result
else:
raise KeyError("'%s' not in vocabulary" % entity)
def __getitem__(self, entities):
"""
Accept a single entity (string tag) or list of entities as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(entities, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.get_vector(entities)
return vstack([self.get_vector(entity) for entity in entities])
def __contains__(self, entity):
return entity in self.vocab
def most_similar_to_given(self, entity1, entities_list):
"""Return the entity from entities_list most similar to entity1."""
return entities_list[argmax([self.similarity(entity1, entity) for entity in entities_list])]
def closer_than(self, entity1, entity2):
"""Returns all entities that are closer to `entity1` than `entity2` is to `entity1`."""
all_distances = self.distances(entity1)
e1_index = self.vocab[entity1].index
e2_index = self.vocab[entity2].index
closer_node_indices = np.where(all_distances < all_distances[e2_index])[0]
return [self.index2entity[index] for index in closer_node_indices if index != e1_index]
def rank(self, entity1, entity2):
"""Rank of the distance of `entity2` from `entity1`, in relation to distances of all entities from `entity1`."""
return len(self.closer_than(entity1, entity2)) + 1
class WordEmbeddingsKeyedVectors(BaseKeyedVectors):
"""Class containing common methods for operations over word vectors."""
def __init__(self, vector_size, vector_dtype=REAL, init_pretrained_config=None):
super(WordEmbeddingsKeyedVectors, self).__init__(vector_size=vector_size, vector_dtype=vector_dtype)
self.vectors_norm = None
self.index2word = []
self.index2freq = []
self.init_pretrained_config = init_pretrained_config
@property
@deprecated("Attribute will be removed in 4.0.0, use self instead")
def wv(self):
return self
@property
def index2entity(self):
return self.index2word
@index2entity.setter
def index2entity(self, value):
self.index2word = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self):
return self.vectors
@syn0.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self, value):
self.vectors = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self):
return self.vectors_norm
@syn0norm.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self, value):
self.vectors_norm = value
def __contains__(self, word):
return word in self.vocab
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.*2vec.*2VecKeyedVectors.load` which supports
operations on trained word vectors like `most_similar`.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_norm'])
super(WordEmbeddingsKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Examples
--------
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.vectors_norm[self.vocab[word].index]
else:
result = self.vectors[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def get_vector(self, word):
return self.word_vec(word)
def words_closer_than(self, w1, w2):
"""
Returns all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
>>> model.words_closer_than('carnivore', 'mammal')
['dog', 'canine']
"""
return super(WordEmbeddingsKeyedVectors, self).closer_than(w1, w2)
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
Parameters
----------
word : str
Word
topn : int
Number of top-N similar words to return. If topn is False, similar_by_word returns
the vector of similarity scores.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
Parameters
----------
vector : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
topn : int
Number of top-N similar words to return. If topn is False, similar_by_vector returns
the vector of similarity scores.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def similarity_matrix(self, dictionary, tfidf=None, threshold=0.0, exponent=2.0, nonzero_limit=100, dtype=REAL):
"""Constructs a term similarity matrix for computing Soft Cosine Measure.
Constructs a a sparse term similarity matrix in the :class:`scipy.sparse.csc_matrix` format for computing
Soft Cosine Measure between documents.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies a mapping between words and the indices of rows and columns
of the resulting term similarity matrix.
tfidf : :class:`gensim.models.tfidfmodel.TfidfModel`, optional
A model that specifies the relative importance of the terms in the dictionary. The rows
of the term similarity matrix will be build in an increasing order of importance of terms,
or in the order of term identifiers if None.
threshold : float, optional
Only pairs of words whose embeddings are more similar than `threshold` are considered
when building the sparse term similarity matrix.
exponent : float, optional
The exponent applied to the similarity between two word embeddings when building the term similarity matrix.
nonzero_limit : int, optional
The maximum number of non-zero elements outside the diagonal in a single row or column
of the term similarity matrix. Setting `nonzero_limit` to a constant ensures that the
time complexity of computing the Soft Cosine Measure will be linear in the document
length rather than quadratic.
dtype : numpy.dtype, optional
Data-type of the term similarity matrix.
Returns
-------
:class:`scipy.sparse.csc_matrix`
Term similarity matrix.
See Also
--------
:func:`gensim.matutils.softcossim`
The Soft Cosine Measure.
:class:`gensim.similarities.docsim.SoftCosineSimilarity`
A class for performing corpus-based similarity queries with Soft Cosine Measure.
Notes
-----
The constructed matrix corresponds to the matrix Mrel defined in section 2.1 of
`Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3: Soft-Cosine Semantic Similarity
between Questions for Community Question Answering", 2017
<http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`__.
"""
logger.info("constructing a term similarity matrix")
matrix_order = len(dictionary)
matrix_nonzero = [1] * matrix_order
matrix = sparse.identity(matrix_order, dtype=dtype, format="dok")
num_skipped = 0
# Decide the order of rows.
if tfidf is None:
word_indices = range(matrix_order)
else:
assert max(tfidf.idfs) < matrix_order
word_indices = [
index for index, _ in sorted(tfidf.idfs.items(), key=lambda x: x[1], reverse=True)
]
# Traverse rows.
for row_number, w1_index in enumerate(word_indices):
if row_number % 1000 == 0:
logger.info(
"PROGRESS: at %.02f%% rows (%d / %d, %d skipped, %.06f%% density)",
100.0 * (row_number + 1) / matrix_order, row_number + 1, matrix_order,
num_skipped, 100.0 * matrix.getnnz() / matrix_order**2)
w1 = dictionary[w1_index]
if w1 not in self.vocab:
num_skipped += 1
continue # A word from the dictionary is not present in the word2vec model.
# Traverse upper triangle columns.
if matrix_order <= nonzero_limit + 1: # Traverse all columns.
columns = (
(w2_index, self.similarity(w1, dictionary[w2_index]))
for w2_index in range(w1_index + 1, matrix_order)
if w1_index != w2_index and dictionary[w2_index] in self.vocab)
else: # Traverse only columns corresponding to the embeddings closest to w1.
num_nonzero = matrix_nonzero[w1_index] - 1
columns = (
(dictionary.token2id[w2], similarity)
for _, (w2, similarity)
in zip(
range(nonzero_limit - num_nonzero),
self.most_similar(positive=[w1], topn=nonzero_limit - num_nonzero)
)
if w2 in dictionary.token2id
)
columns = sorted(columns, key=lambda x: x[0])
for w2_index, similarity in columns:
# Ensure that we don't exceed `nonzero_limit` by mirroring the upper triangle.
if similarity > threshold and matrix_nonzero[w2_index] <= nonzero_limit:
element = similarity**exponent
matrix[w1_index, w2_index] = element
matrix_nonzero[w1_index] += 1
matrix[w2_index, w1_index] = element
matrix_nonzero[w2_index] += 1
logger.info(
"constructed a term similarity matrix with %0.6f %% nonzero elements",
100.0 * matrix.getnnz() / matrix_order**2
)
return matrix.tocsc()
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info(
"At least one of the documents had no words that werein the vocabulary. "
"Aborting (returning inf)."
)
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if t1 not in docset1 or t2 not in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
@staticmethod
def log_accuracy(section):
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
idx = argmax(section["correct"])
correct, incorrect = section["correct"][idx], section["incorrect"][idx]
if correct + incorrect > 0:
print("{}: {:.1f}% ({}/{}) for t={:.2f}".format(
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect, idx*0.1
))
def eval_accuracy_for_batch(self, batch, section, most_similar, restrict_vocab, case_insensitive, debug=False):
if len(batch) == 0:
return
batch_arr = np.array(batch).reshape((-1, 4))
A = batch_arr[:, 0]
B = batch_arr[:, 1]
C = batch_arr[:, 2]
expected = [self.index2word[i].upper() if case_insensitive else self.index2word[i] for i in batch_arr[:, 3]]
# find the most likely prediction, ignoring OOV words and input words
results = most_similar(self, positive=[B, C], negative=A, restrict_vocab=restrict_vocab, debug=debug)
for result in results:
# correct, incorrect = [], []
correct, incorrect = 0, 0
predicted = [word.upper() for word in result[0]] if case_insensitive else result[0]
for i, info in enumerate(zip(expected, predicted, A, B, C)):
exp, pred, a, b, c = info
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
if pred == exp:
correct += 1
else:
incorrect += 1
section["correct"].append(correct)
section["incorrect"].append(incorrect)
batch.clear()
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True, debug=False, verbose=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in
https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip
for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
if not most_similar:
most_similar = VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy
sections, section = [], None
batch = []
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(questions)):
line = utils.to_unicode(line)
if line.startswith(': '):
# Evaluate previous section.
self.eval_accuracy_for_batch(batch=batch, section=section, most_similar=most_similar,
restrict_vocab=restrict_vocab, case_insensitive=case_insensitive,
debug=debug)
# a new section starts => store the old section
if section:
sections.append(section)
if verbose:
self.log_accuracy(section)
# Only evaluate one section when running in debug mode.
if debug:
return sections
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("skipping invalid line #%i in %s", line_no, questions)
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s", line_no, line.strip())
continue
batch.append([self.vocab[w].index for w in [a, b, c, expected]])
# Evaluate last section.
self.eval_accuracy_for_batch(batch=batch, section=section, most_similar=most_similar,
restrict_vocab=restrict_vocab, case_insensitive=case_insensitive, debug=debug)
self.vocab = original_vocab
if section:
# store the last section, too
sections.append(section)
if verbose:
self.log_accuracy(section)
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
if len(sections) > 1:
sem = {
'section': 'semantic',
'correct': np.array([np.array(section["correct"])
for section in filter(lambda s: not s['section'].startswith('gram'), sections)]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"])
for section in filter(lambda s: not s['section'].startswith('gram'), sections)]).sum(axis=0),
}
syn = {
'section': 'syntactic',
'correct': np.array([np.array(section["correct"])
for section in filter(lambda s: s['section'].startswith('gram'), sections)]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"])
for section in filter(lambda s: s['section'].startswith('gram'), sections)]).sum(axis=0),
}
total = {
'section': 'total',
'correct': np.array([np.array(section["correct"]) for section in sections]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"]) for section in sections]).sum(axis=0),
}
if len(sections) > 1:
if verbose:
self.log_accuracy(sem)
self.log_accuracy(syn)
idx = argmax(sem["correct"])
sem["correct"] = [sem["correct"][idx]]
sem["incorrect"] = [sem["incorrect"][idx]]
sem["t_argmax"] = [idx * 0.1]
sections.append(sem)
idx = argmax(syn["correct"])
syn["correct"] = [syn["correct"][idx]]
syn["incorrect"] = [syn["incorrect"][idx]]
syn["t_argmax"] = [idx * 0.1]
sections.append(syn)
if verbose:
self.log_accuracy(total)
idx = argmax(total["correct"])
total["correct"] = [total["correct"][idx]]
total["incorrect"] = [total["incorrect"][idx]]
total["t_argmax"] = [idx * 0.1]
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.debug('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.debug('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.debug('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False, debug_file=None):
"""
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
if debug_file:
f = open(debug_file, "w")
f.write("Word1,Word2,Gold standard(0-10),Model similarity (-hyp_dist^2)\n")
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('Skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero similarity for line #%d with OOV words: %s', line_no, line.strip())
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('Skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
model_sim = self.similarity(a, b)
similarity_model.append(model_sim) # Similarity from the model
if debug_file:
f.write(a.lower() + "," + b.lower() + "," + str(sim) + "," + str(model_sim) + "\n")
if debug_file:
f.close()
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
if dummy4unknown:
oov_ratio = float(oov) / len(similarity_gold) * 100
else:
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from WordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
if replace:
for i in xrange(self.vectors.shape[0]):
self.vectors[i, :] /= sqrt((self.vectors[i, :] ** 2).sum(-1))
self.vectors_norm = self.vectors
else:
self.vectors_norm = (self.vectors / sqrt((self.vectors ** 2).sum(-1))[..., newaxis]).astype(dtype)
class PoincareWordEmbeddingsKeyedVectors(WordEmbeddingsKeyedVectors):
"""
Class used for word embeddings on the Poincare ball which use the Poincare geodesic distance for the similarity
metric (instead of the cosine similarity).
"""
def __init__(self, vector_size, vector_dtype=REAL, trainables=None, init_near_border=False,
init_pretrained_config=False):
super(PoincareWordEmbeddingsKeyedVectors, self).__init__(vector_size=vector_size, vector_dtype=vector_dtype)
# If True, use Poincare distance to measure similarity between words. Otherwise, use cosine distance.
self.use_poincare_distance = True
self.trainables = trainables
self.init_near_border = init_near_border
self.init_pretrained_config = init_pretrained_config
def batch_most_similar_hyperbolic_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes the similarity (according to the formula defined for the hyperbolic space) between
the parallel transport of the input vector and the vectors for each word in the model and selects the word
that is closest to the position of the parallel transported vector.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
# Compute the parallel transport of the positive vector in the analogy question (i.e. c) using the new formula
parallel_transp1 = self.moebius_add_mat(
pos_emb[1],
self.gyr_mat(pos_emb[1], -neg_emb, self.moebius_add_mat(-neg_emb, pos_emb[0]))) # batch_size x vector_size
# Compute the parallel transport of the other positive vector (i.e. b) so the alternative formulation of the
# analogy question.
parallel_transp2 = self.moebius_add_mat(
pos_emb[0],
self.gyr_mat(pos_emb[0], -neg_emb, self.moebius_add_mat(-neg_emb, pos_emb[1]))) # batch_size x vector_size
# Compute the gyrolinear combination between the two parallel
# transported points.
t = 0.3
aux = self.moebius_add_mat(-parallel_transp1, parallel_transp2)
results = []
lin_comb_point = self.moebius_add_mat(parallel_transp1, self.moebius_mul_mat(aux, t))
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
# NOTE!!! This is not actually the distance, but cosh(distance) (so only the argument of arccosh in the
# Poincare distance formula). However, cosh(x) is monotonous (for positive x) which means that we will get
# the same argmax in the end.
dists = self.cosh_distances_mat(lin_comb_point, limited) # batch_size * vocab_size
else:
# Get normalized vectors, if we use cosine distance.
limited_norm = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
lin_comb_point = lin_comb_point / norm(lin_comb_point, axis=1)[:, None]
dists = -dot(lin_comb_point, limited_norm.T) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def batch_most_similar_3distadd_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
Implements 3DISTADD. This replaces the cosine similarities in the 3COSADD formula with -dist.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
results = []
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
# NOTE!!! This is not actually the distance, but cosh(distance) (so only the argument of arccosh in the
# Poincare distance formula). However, cosh(x) is monotonous (for positive x) which means that we will get
# the same argmax in the end.
if isinstance(self, MixPoincareWordEmbeddingsKeyedVectors):
dists = (self.mix_distances_mat(pos_emb[0], limited) + self.mix_distances_mat(pos_emb[1], limited) -
self.mix_distances_mat(neg_emb, limited)) # batch_size * vocab_size
else:
dists = (self.cosh_distances_mat(pos_emb[0], limited) + self.cosh_distances_mat(pos_emb[1], limited) -
self.cosh_distances_mat(neg_emb, limited)) # batch_size * vocab_size
else:
# Get normalized vectors, if we use cosine distance.
limited_norm = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
pos_emb[0] = pos_emb[0] / norm(pos_emb[0], axis=1)[:, None]
pos_emb[1] = pos_emb[1] / norm(pos_emb[1], axis=1)[:, None]
neg_emb = neg_emb / norm(neg_emb, axis=1)[:, None]
if isinstance(self, MixPoincareWordEmbeddingsKeyedVectors):
dists = (self.mix_distances_mat(pos_emb[0], limited_norm) + self.mix_distances_mat(pos_emb[1], limited_norm) -
self.mix_distances_mat(neg_emb, limited_norm)) # batch_size * vocab_size
else:
dists = -(dot(pos_emb[0], limited_norm.T) + dot(pos_emb[1], limited_norm.T) - dot(neg_emb, limited_norm.T)) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def cosh_distances_mat(self, vectors, other_vectors=None):
"""
Returns the argument of the arccosh function in the Poincare distance formula. Since arccosh(x) is a monotonous
function for x >= 1, this is enough to create a ranking and select the closest point to another reference point.
Parameters
----------
vectors: numpy.array
Vectors from which distances are to be computed.
other_vectors: numpy.array
For each vector in `other_vectors` distance from each vector in `vectors` is computed.
If None or empty, all words in vocab are considered (including the vectors in `vectors`).
Returns
-------
np.array
Returns a numpy.array that contains the distance between each row in `vectors`
and each row in `other_vectors`
"""
if other_vectors is None:
other_vectors = self.vectors
dot_ww = (other_vectors * other_vectors).sum(axis=1) # vocab_size * 1
beta_w = 1.0 / (1 - dot_ww) # vocab_size * 1
dot_vv = (vectors * vectors).sum(axis=1) # batch_size * 1
alpha_v = 1.0 / (1 - dot_vv) # batch_size * 1
dot_vw = dot(vectors, other_vectors.T) # batch_size * vocab_size
cosh_dists = 1 + (-2 * dot_vw + dot_ww + dot_vv[:, None]) * alpha_v[:, None] * beta_w # batch_size * vocab
return cosh_dists
def distances(self, word_or_vector, other_vectors=None):
"""
Compute the cosh of the Poincare distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_vectors: numpy.array or None
For each vector in `other_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if other_vectors is None:
other_vectors = self.vectors
if self.use_poincare_distance:
return self.cosh_distances_mat(np.array([input_vector]), other_vectors)
else:
return 1 - VanillaWordEmbeddingsKeyedVectors.cosine_similarities(input_vector, other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute distance between two words or vectors inside the Poincare ball.
Example
--------
>>> trained_model.distance('woman', 'man')
"""
v1 = self.word_vec(word_or_vector1) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2) if isinstance(word_or_vector2, string_types) else word_or_vector2
if self.use_poincare_distance:
diff = v1 - v2
dist = arccosh(1 + 2 * dot(diff, diff) / (1 - dot(v1, v1) + EPS) / (1 - dot(v2, v2) + EPS))
return dist
else:
return 1 - dot(matutils.unitvec(v1), matutils.unitvec(v2))
def similarity(self, w1, w2):
"""
Compute similarity between two words based on the Poincare distance between them.
Example
--------
>>> trained_model.similarity('woman', 'man')
"""
if self.use_poincare_distance:
return -self.distance(w1, w2)**2
# return -self.distance(w1, w2)**2 * norm(self[w1] - self[w2])**2
# return -norm(self[w1] - self[w2])**2
else:
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def embedding_norm(self, word_or_vector):
"""
Compute embedding Poincare norm for a given word.
Parameters
----------
w : string
word
"""
v = self[word_or_vector] if isinstance(word_or_vector, string_types) else word_or_vector
return arccosh(1 + 2 * dot(v, v) / (1 - dot(v, v)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from PoincareWordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
self.vectors_norm = np.empty_like(self.vectors, dtype=dtype)
# XXX: uncomment this line to compute gyrocosine
# norms = self.embedding_norms_mat(self.vectors)
norms = norm(self.vectors, axis=1)
self.vectors_norm = (self.vectors / norms[:, None]).astype(dtype)
@staticmethod
def moebius_add_mat(A, B):
"""
Return the result of the Moebius addition of the rows of matrix A with the rows of B.
Parameters
----------
A : numpy.array
matrix, first argument of addition
B : numpy.array
matrix, second argument of addition
Returns
-------
:obj: `numpy.array`
matrix; Result of Moebius addition of the rows of matrix A with the rows of B
"""
dot_aa = np.sum(A*A, axis=1)
dot_bb = np.sum(B*B, axis=1)
dot_ab = np.sum(A*B, axis=1)
denominator = 1 + 2 * dot_ab + dot_aa * dot_bb
coef_a = (1 + 2 * dot_ab + dot_bb) / denominator
coef_b = (1 - dot_aa) / denominator
return A * coef_a[:, None] + B * coef_b[:, None]
@staticmethod
def moebius_add(a, b):
"""
Return the result of the Moebius addition of the two vectors, a + b
Parameters
----------
a : numpy.array
vector, first argument of addition
b : numpy.array
vector, second argument of addition
Returns
-------
:obj: `numpy.array`
Result of Moebius addition a + b
"""
dot_aa = dot(a, a)
dot_bb = dot(b, b)
dot_ab = dot(a, b)
return ((1 + 2 * dot_ab + dot_bb) * a + (1 - dot_aa) * b) / (1 + 2 * dot_ab + dot_aa * dot_bb)
@staticmethod
def moebius_mul_mat(A, r):
"""
Return the result of the Moebius scalar multiplication of vector v with scalar r
Parameters
----------
A : numpy.array (2D matrix)
r : scalar
Returns
-------
:obj: `numpy.array`
Result of Moebius scalar multiplication between r and each of the rows of A
"""
norm_v = norm(A, axis=1)
return A * (tanh(r * arctanh(norm_v)) / (norm_v + 1e-10))[:, None]
@staticmethod
def moebius_mul(v, r):
"""
Return the result of the Moebius scalar multiplication of vector v with scalar r
Parameters
----------
v : numpy.array (1D vector)
r : scalar
Returns
-------
:obj: `numpy.array`
Result of Moebius scalar multiplication r * v
"""
norm_v = norm(v)
return tanh(r * arctanh(norm_v)) / norm_v * v
@staticmethod
def embedding_norms_mat(vectors):
"""
Compute embedding Poincare norm for a set of vectors.
Parameters
----------
vectors : matrix
np.array
"""
dot_vv = (vectors * vectors).sum(axis=1)
return arccosh(1 + 2 * dot_vv / (1 - dot_vv))
@staticmethod
def gyr(u, v, x):
"""
Return the result of gyr[u, v](x).
u : numpy.array (1D vector)
v : numpy.array (1D vector)
x : numpy.array (1D vector)
Returns
-------
:obj: `numpy.array`
Result of gyr[u, v](x)
"""
a = PoincareWordEmbeddingsKeyedVectors.moebius_add(u, v)
b = PoincareWordEmbeddingsKeyedVectors.moebius_add(u, PoincareWordEmbeddingsKeyedVectors.moebius_add(v, x))
return PoincareWordEmbeddingsKeyedVectors.moebius_add(-a, b)
@staticmethod
def gyr_mat(u, v, x):
"""
Return the result of gyr[u, v](x).
u : numpy.array (2D matrix)
v : numpy.array (2D matrix)
x : numpy.array (2D matrix)
Returns
-------
:obj: `numpy.array` (2D matrix)
Result of gyr[u, v](x)
"""
dot_uu = (u * u).sum(axis=1) # batch_size x 1
dot_vv = (v * v).sum(axis=1) # batch_size x 1
dot_uv = (u * v).sum(axis=1) # batch_size x 1
dot_ux = (u * x).sum(axis=1) # batch_size x 1
dot_vx = (v * x).sum(axis=1) # batch_size x 1
A = -dot_ux * dot_vv + dot_vx + 2 * dot_uv * dot_vx
B = -dot_vx * dot_uu - dot_ux
D = 1 + 2 * dot_uv + dot_uu * dot_vv
coef_u = 2 * A / D
coef_v = 2 * B / D
return x + u * coef_u[:, None] + v * coef_v[:, None]
@staticmethod
def exp_map_mat(V, X):
"""
Return the result of the exponential map applied from the tangent plane at point x, on the vector v that belongs
to the tangent plane
Parameters
----------
V : numpy.array
matrix, the rows are vectors that belong in the tangent plane at x
X : numpy.array
matrix, the rows are points on the manifold, where the tangent plane is considered
Returns
-------
:obj: `numpy.array`
Result of the exponential map on each of the rows of the output matrix
"""
norm_v = np.linalg.norm(V, axis=1)
dot_xx = np.sum(X*X, axis=1)
coef = tanh(1.0/dot_xx * norm_v) / norm_v
second_term = V * coef[:, None]
return PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(X, second_term)
@staticmethod
def log_map_mat(V, X):
"""
Return the result of the logarithmic map. The resulting point belongs to the tangent plane at point x.
Both x and v are points on the manifold
Parameters
----------
V : numpy.array
matrix, the rows are vectors that belong to the manifold
X : numpy.array
matrix, the rows are points on the manifold, where the tangent plane is considered
Returns
-------
:obj: `numpy.array`
Result of the logarithmic map on each of the rows of the output matrix
"""
add_result = PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(-X, V)
norm_add_result = np.linalg.norm(add_result, axis=1)
dot_xx = np.sum(X*X, axis=1)
coef = dot_xx * arctanh(norm_add_result) / norm_add_result
return add_result * coef[:, None]
class MixPoincareWordEmbeddingsKeyedVectors(PoincareWordEmbeddingsKeyedVectors):
def __init__(self, vector_size, num_embs, vector_dtype=REAL, trainables=None, init_near_border=False,
init_pretrained_config=False):
super(MixPoincareWordEmbeddingsKeyedVectors, self).__init__(
vector_size=vector_size, vector_dtype=vector_dtype, trainables=trainables,
init_near_border=init_near_border, init_pretrained_config=init_pretrained_config)
self.num_embs = num_embs
self.small_emb_size = int(vector_size / num_embs)
def batch_most_similar_mix_hyperbolic_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes the similarity (according to the formula defined for the hyperbolic space) between
the parallel transport of the input vector and the vectors for each word in the model and selects the word
that is closest to the position of the parallel transported vector.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
parallel_transp1 = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
parallel_transp2 = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
aux = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
lin_comb_point = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
small_emb_size = int(self.vector_size / self.num_embs)
# Compute gyro-parallel transport in each of the small dimensional spaces.
for i in range(self.num_embs):
# Compute the parallel transport of the positive vector in the analogy question (i.e. c) using the new
# formula
start = small_emb_size * i
end = small_emb_size * (i+1)
parallel_transp1[:, start:end] = self.moebius_add_mat(
pos_emb[1][:, start:end],
self.gyr_mat(pos_emb[1][:, start:end],
-neg_emb[:, start:end],
self.moebius_add_mat(-neg_emb[:, start:end],
pos_emb[0][:, start:end]))) # batch_size x vector_size
# Compute the parallel transport of the other positive vector (i.e. b) so the alternative formulation of the
# analogy question.
parallel_transp2[:, start:end] = self.moebius_add_mat(
pos_emb[0][:, start:end],
self.gyr_mat(pos_emb[0][:, start:end], -neg_emb[:, start:end],
self.moebius_add_mat(-neg_emb[:, start:end],
pos_emb[1][:, start:end]))) # batch_size x vector_size
aux[:, start:end] = self.moebius_add_mat(-parallel_transp1[:, start:end], parallel_transp2[:, start:end])
# Compute the gyrolinear combination between the two parallel
# transported points.
t = 0.3
results = []
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
lin_comb_point[:, start:end] = self.moebius_add_mat(parallel_transp1[:, start:end],
self.moebius_mul_mat(aux[:, start:end], t))
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
else:
# Get normalized vectors, if we use cosine distance.
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
lin_comb_point[:, start:end] = lin_comb_point[:, start:end] / (norm(lin_comb_point[:, start:end], axis=1)[:, None] + 1e-5)
dists = self.mix_distances_mat(lin_comb_point, limited) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def mix_distances_mat(self, vectors, other_vectors=None):
"""
Return distance in the product of hyperbolic spaces, between the rows of `vectors` and the rows of
`other_vectors`.
Parameters
----------
vectors: numpy.array
Vectors from which distances are to be computed.
other_vectors: numpy.array
For each vector in `other_vectors` distance from each vector in `vectors` is computed.
If None or empty, all words in vocab are considered (including the vectors in `vectors`).
Returns
-------
np.array
Returns a numpy.array that contains the distance between each row in `vectors`
and each row in `other_vectors`
"""
dists = zeros((vectors.shape[0], other_vectors.shape[0]), dtype=self.vector_dtype)
small_emb_size = int(self.vector_size / self.num_embs)
if self.use_poincare_distance == True:
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
curr_dists = np.arccosh(
self.cosh_distances_mat(vectors[:, start:end], other_vectors[:, start:end]))
dists += curr_dists * curr_dists
dists = np.sqrt(dists)
else:
# The vectors need to be normalized!!!
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
curr_dists = -dot(vectors[:, start:end], other_vectors[:, start:end].T)
dists += curr_dists
return dists
def distances(self, word_or_vector, other_vectors=None):
"""
Compute the distance in a product of Poincare balls from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_vectors: numpy.array or None
For each vector in `other_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if self.use_poincare_distance:
use_norm = False
else:
use_norm = True
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector, use_norm=use_norm)
else:
input_vector = word_or_vector
if other_vectors is None:
if use_norm:
other_vectors = self.vectors_norm
else:
other_vectors = self.vectors
return self.mix_distances_mat(np.array([input_vector]), other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute distance between two words or vectors represented in a Cartesian product of Poincare balls.
Example
--------
>>> trained_model.distance('woman', 'man')
"""
if self.use_poincare_distance:
use_norm = False
else:
use_norm = True
v1 = self.word_vec(word_or_vector1, use_norm=use_norm) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2, use_norm=use_norm) if isinstance(word_or_vector2, string_types) else word_or_vector2
return self.mix_distances_mat(np.array([v1]), np.array([v2]))[0][0]
def similarity(self, w1, w2):
"""
Compute similarity between two words based on the Poincare distance between them.
Example
--------
>>> trained_model.similarity('woman', 'man')
"""
return -self.distance(w1, w2)
def embedding_norm(self, word_or_vector):
"""
Compute embedding norm in product of Poincare balls for a given word.
Parameters
----------
w : string
word
"""
v = self[word_or_vector] if isinstance(word_or_vector, string_types) else word_or_vector
small_emb_size = int(self.vector_size / self.num_embs)
norms = empty(self.num_embs)
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
if self.use_poincare_distance:
norms[i] = arccosh(1 + 2 * dot(v[start:end], v[start:end]) / (1 - dot(v[start:end], v[start:end])))
else:
norms[i] = norm(v[start:end])
return norm(norms)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from MixPoincareWordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
self.vectors_norm = np.empty_like(self.vectors, dtype=dtype)
small_emb_size = int(self.vector_size / self.num_embs)
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
# norms = PoincareWordEmbeddingsKeyedVectors.embedding_norms_mat(self.vectors[:, start:end]) + 1e-5
norms = norm(self.vectors[:, start:end], axis=1) + 1e-5
self.vectors_norm[:, start:end] = (self.vectors[:, start:end] / norms[:, None]).astype(dtype)
class VanillaWordEmbeddingsKeyedVectors(WordEmbeddingsKeyedVectors):
"""
Class used as base class for vanilla word embeddings that use cosine similarity (e.g. word2vec, fasttext).
"""
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None, debug=False):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : :obj: `list` of :obj: `str`
List of words that contribute positively.
negative : :obj: `list` of :obj: `str`
List of words that contribute negatively.
topn : int
Number of top-N similar words to return.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
Examples
--------
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
dtype = REAL
if hasattr(self, "vector_dtype"):
dtype = self.vector_dtype
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(dtype)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab]
# Compute 3COSADD.
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def batch_most_similar_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
self.init_sims()
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
# Retrieve embeddings.
pos_emb = [
self.vectors_norm[positive[0]],
self.vectors_norm[positive[1]]
]
neg_emb = self.vectors_norm[negative]
# compute the weighted average of all input words, where positive words have weight 1
# and negative words have weight -1
weighted_mean = (pos_emb[0] + pos_emb[1] - neg_emb) / 3 # batch_size * vector_size
mean_norm = norm(weighted_mean, axis=1)
weighted_mean = weighted_mean / mean_norm[:, None]
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Compute 3COSADD.
sims = dot(weighted_mean, limited.T) # batch_size * vocab_size
min_float = np.finfo(sims.dtype).min
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
sims[x, y] = min_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(sims[i], topn=10, reverse=True)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]])))
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmax(sims, axis=1)
result = (
[self.index2word[i] for i in best_ids],
sims[batch_size_range, best_ids].astype(np.float32),
best)
return [result]
def batch_most_similar_cosmul_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
self.init_sims()
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
# Retrieve embeddings.
pos_emb = [
self.vectors_norm[positive[0]],
self.vectors_norm[positive[1]]
]
neg_emb = self.vectors_norm[negative]
# # compute the weighted average of all input words, where positive words have weight 1
# # and negative words have weight -1
# weighted_mean = (pos_emb[0] + pos_emb[1] - neg_emb) / 3 # batch_size * vector_size
# mean_norm = norm(weighted_mean, axis=1)
# weighted_mean = weighted_mean / mean_norm[:, None]
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [
(1 + dot(pos_emb[0], limited.T)) / 2, # batch_size * vocab_size
(1 + dot(pos_emb[1], limited.T)) / 2 # batch_size * vocab_size
]
neg_dists = (1 + dot(neg_emb, limited.T)) / 2 # batch_size * vocab_size
sims = pos_dists[0] * pos_dists[1] / (neg_dists + 0.000001) # batch_size * vocab_size
min_float = np.finfo(sims.dtype).min
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
sims[x, y] = min_float # batch_size * (vocab_size - 3)
best_ids = np.argmax(sims, axis=1)
result = (
[self.index2word[i] for i in best_ids],
sims[batch_size_range, best_ids].astype(np.float32))
return [result]
def most_similar_cosmul(self, positive=None, negative=None, topn=10, restrict_vocab=None, debug=False):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = {
self.vocab[word].index for word in positive + negative
if not isinstance(word, ndarray) and word in self.vocab
}
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab]
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(limited, term)) / 2) for term in positive]
neg_dists = [((1 + dot(limited, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""
Return cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed.
expected shape (num_vectors, dim)
Returns
-------
:obj: `numpy.array`
Contains cosine distance between vector_1 and each row in vectors_all.
shape (num_vectors,)
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words_or_vectors=()):
"""
Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_words_or_vectors : iterable(str) or numpy.array
For each word in `other_words_or_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words_or_vectors` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words_or_vectors` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if not len(other_words_or_vectors):
other_vectors = self.vectors
else:
if isinstance(other_words_or_vectors[0], string_types):
other_indices = [self.vocab[word].index for word in other_words_or_vectors]
other_vectors = self.vectors[other_indices]
else:
other_vectors = other_words_or_vectors
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute cosine distance between two words.
Examples
--------
>>> trained_model.distance('woman', 'man')
0.34
>>> trained_model.distance('woman', 'woman')
0.0
"""
v1 = self.word_vec(word_or_vector1) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2) if isinstance(word_or_vector2, string_types) else word_or_vector2
return 1 - dot(matutils.unitvec(v1), matutils.unitvec(v2))
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Examples
--------
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def embedding_norm(self, word_or_vector):
"""
Compute embedding norm for a given word.
Parameters
----------
word_or_vector : string or array
word or vector
"""
v = self.word_vec(word_or_vector) if isinstance(word_or_vector, string_types) else word_or_vector
return norm(v)
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Examples
--------
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Parameters
----------
words : :obj: `list` of :obj: `str`
List of words
Returns
-------
str
The word further away from the mean of all words.
Example
-------
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(self.vector_dtype)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(self.vector_dtype)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
class Word2VecKeyedVectors(VanillaWordEmbeddingsKeyedVectors):
"""Class to contain vectors and vocab for word2vec model.
Used to perform operations on the vectors such as vector lookup, distance, similarity etc.
"""
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str
Optional file path to the vocabulary.Word counts are read from `fvocab` filename,
if set (this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool
If True, indicates whether the data is in binary word2vec format.
encoding : str
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
unicode_errors : str
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : :class: `numpy.float*`
(Experimental) Can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
Returns
-------
:obj: `~gensim.models.word2vec.Wod2Vec`
Returns the loaded model as an instance of :class: `~gensim.models.word2vec.Wod2Vec`.
"""
# from gensim.models.word2vec import load_word2vec_format
return _load_word2vec_format(
Word2VecKeyedVectors, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
def get_keras_embedding(self, train_embeddings=False):
"""Return a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings
Parameters
----------
train_embeddings : bool
If False, the weights are frozen and stopped from being updated.
If True, the weights can/will be further trained/updated.
Returns
-------
:obj: `keras.layers.Embedding`
Embedding layer
"""
try:
from keras.layers import Embedding
except ImportError:
raise ImportError("Please install Keras to use this function")
weights = self.vectors
# set `trainable` as `False` to use the pretrained word embedding
# No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
layer = Embedding(
input_dim=weights.shape[0], output_dim=weights.shape[1],
weights=[weights], trainable=train_embeddings
)
return layer
KeyedVectors = Word2VecKeyedVectors # alias for backward compatibility
class Doc2VecKeyedVectors(BaseKeyedVectors):
def __init__(self, vector_size, mapfile_path):
super(Doc2VecKeyedVectors, self).__init__(vector_size=vector_size)
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.vectors_docs = []
self.mapfile_path = mapfile_path
self.vector_size = vector_size
self.vectors_docs_norm = None
@property
def index2entity(self):
return self.offset2doctag
@index2entity.setter
def index2entity(self, value):
self.offset2doctag = value
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs instead")
def doctag_syn0(self):
return self.vectors_docs
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs_norm instead")
def doctag_syn0norm(self):
return self.vectors_docs_norm
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if index in self:
if isinstance(index, string_types + integer_types + (integer,)):
return self.vectors_docs[self._int_index(index, self.doctags, self.max_rawint)]
return vstack([self[i] for i in index])
raise KeyError("tag '%s' not seen in training corpus/invalid" % index)
def __contains__(self, index):
if isinstance(index, integer_types + (integer,)):
return index < self.count
else:
return index in self.doctags
def __len__(self):
return self.count
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.doc2vec.Doc2VecKeyedVectors.load` which supports
operations on trained document vectors like `most_similar`.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_docs_norm'])
super(Doc2VecKeyedVectors, self).save(*args, **kwargs)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'vectors_docs_norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.vectors_docs.shape[0]):
self.vectors_docs[i, :] /= sqrt((self.vectors_docs[i, :] ** 2).sum(-1))
self.vectors_docs_norm = self.vectors_docs
else:
if self.mapfile_path:
self.vectors_docs_norm = np_memmap(
self.mapfile_path + '.vectors_docs_norm', dtype=REAL,
mode='w+', shape=self.vectors_docs.shape)
else:
self.vectors_docs_norm = empty(self.vectors_docs.shape, dtype=REAL)
np_divide(
self.vectors_docs, sqrt((self.vectors_docs ** 2).sum(-1))[..., newaxis], self.vectors_docs_norm)
def most_similar(self, positive=None, negative=None, topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying `vectors_docs_norm` vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
Parameters
----------
positive : :obj: `list`
List of Docs specifed as vectors, integer indexes of trained docvecs or string tags
that contribute positively.
negative : :obj: `list`
List of Docs specifed as vectors, integer indexes of trained docvecs or string tags
that contribute negatively.
topn : int
Number of top-N similar docvecs to return.
clip_start : int
Start clipping index.
clip_end : int
End clipping index.
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (doc, similarity)
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
clip_end = clip_end or len(self.vectors_docs_norm)
if isinstance(positive, string_types + integer_types + (integer,)) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)])
all_docs.add(self._int_index(doc, self.doctags, self.max_rawint))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.vectors_docs_norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [
(self._index_to_doctag(sim + clip_start, self.offset2doctag, self.max_rawint), float(dists[sim]))
for sim in best
if (sim + clip_start) not in all_docs
]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
docs : :obj: `list` of (str or int)
List of seen documents specified by their corresponding string tags or integer indices.
Returns
-------
str or int
The document further away from the mean of all the documents.
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s", docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(
self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
d1 : int or str
Indicate the first document by it's string tag or integer index.
d2 : int or str
Indicate the second document by it's string tag or integer index.
Returns
-------
float
The cosine similarity between the vectors of the two documents.
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
ds1 : :obj: `list` of (str or int)
Specify the first set of documents as a list of their integer indices or string tags.
ds2 : :obj: `list` of (str or int)
Specify the second set of documents as a list of their integer indices or string tags.
Returns
-------
float
The cosine similarity between the means of the documents in each of the two sets.
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def distance(self, d1, d2):
"""
Compute cosine distance between two documents.
"""
return 1 - self.similarity(d1, d2)
# required by base keyed vectors class
def distances(self, d1, other_docs=()):
"""Compute distances from given document (string tag or int index) to all documents in `other_docs`.
If `other_docs` is empty, return distance between `d1` and all documents seen during training.
"""
input_vector = self[d1]
if not other_docs:
other_vectors = self.vectors_docs
else:
other_vectors = self[other_docs]
return 1 - WordEmbeddingsKeyedVectors.cosine_similarities(input_vector, other_vectors)
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Parameters
----------
model : :obj: `~gensim.models.doc2vec.Doc2Vec`
An instance of a trained `Doc2Vec` model.
doc_words1 : :obj: `list` of :obj: `str`
The first document. Document should be a list of (word) tokens.
doc_words2 : :obj: `list` of :obj: `str`
The second document. Document should be a list of (word) tokens.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
steps : int
Number of times to train the new document.
Returns
-------
float
The cosine similarity between the unseen documents.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
def save_word2vec_format(self, fname, prefix='*dt_', fvocab=None,
total_vec=None, binary=False, write_first_line=True):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
prefix : str
Uniquely identifies doctags from word vocab, and avoids collision
in case of repeated string in doctag and word vocab.
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
write_first_line : bool
Whether to print the first line in the file. Useful when saving doc-vectors after word-vectors.
"""
total_vec = total_vec or len(self)
with utils.smart_open(fname, 'ab') as fout:
if write_first_line:
logger.info("storing %sx%s projection weights into %s", total_vec, self.vectors_docs.shape[1], fname)
fout.write(utils.to_utf8("%s %s\n" % (total_vec, self.vectors_docs.shape[1])))
# store as in input order
for i in range(len(self)):
doctag = u"%s%s" % (prefix, self._index_to_doctag(i, self.offset2doctag, self.max_rawint))
row = self.vectors_docs[i]
if binary:
fout.write(utils.to_utf8(doctag) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (doctag, ' '.join("%f" % val for val in row))))
@staticmethod
def _int_index(index, doctags, max_rawint):
"""Return int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
@staticmethod
def _index_to_doctag(i_index, offset2doctag, max_rawint):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - max_rawint - 1
if 0 <= candidate_offset < len(offset2doctag):
return offset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.ffset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def int_index(self, index, doctags, max_rawint):
"""Return int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
class FastTextKeyedVectors(VanillaWordEmbeddingsKeyedVectors):
"""
Class to contain vectors and vocab for the FastText training class and other methods not directly
involved in training such as most_similar()
"""
def __init__(self, vector_size, min_n, max_n):
super(FastTextKeyedVectors, self).__init__(vector_size=vector_size)
self.vectors_vocab = None
self.vectors_vocab_norm = None
self.vectors_ngrams = None
self.vectors_ngrams_norm = None
self.buckets_word = None
self.hash2index = {}
self.min_n = min_n
self.max_n = max_n
self.num_ngram_vectors = 0
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab instead")
def syn0_vocab(self):
return self.vectors_vocab
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab_norm instead")
def syn0_vocab_norm(self):
return self.vectors_vocab_norm
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams instead")
def syn0_ngrams(self):
return self.vectors_ngrams
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams_norm instead")
def syn0_ngrams_norm(self):
return self.vectors_ngrams_norm
def __contains__(self, word):
"""
Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if `__contains__` returns True.
"""
if word in self.vocab:
return True
else:
char_ngrams = _compute_ngrams(word, self.min_n, self.max_n)
return any(_ft_hash(ng) % self.bucket in self.hash2index for ng in char_ngrams)
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.fasttext.FastTextKeyedVectors.load` which supports
getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get(
'ignore', ['vectors_norm', 'vectors_vocab_norm', 'vectors_ngrams_norm', 'buckets_word'])
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
"""
if word in self.vocab:
return super(FastTextKeyedVectors, self).word_vec(word, use_norm)
else:
# from gensim.models.fasttext import compute_ngrams
word_vec = np.zeros(self.vectors_ngrams.shape[1], dtype=np.float32)
ngrams = _compute_ngrams(word, self.min_n, self.max_n)
if use_norm:
ngram_weights = self.vectors_ngrams_norm
else:
ngram_weights = self.vectors_ngrams
ngrams_found = 0
for ngram in ngrams:
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash in self.hash2index:
word_vec += ngram_weights[self.hash2index[ngram_hash]]
ngrams_found += 1
if word_vec.any():
return word_vec / max(1, ngrams_found)
else: # No ngrams of the word are present in self.ngrams
raise KeyError('all ngrams for word %s absent from model' % word)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can only call `most_similar`, `similarity` etc.
"""
super(FastTextKeyedVectors, self).init_sims(replace)
if getattr(self, 'vectors_ngrams_norm', None) is None or replace:
logger.info("precomputing L2-norms of ngram weight vectors")
if replace:
for i in range(self.vectors_ngrams.shape[0]):
self.vectors_ngrams[i, :] /= sqrt((self.vectors_ngrams[i, :] ** 2).sum(-1))
self.vectors_ngrams_norm = self.vectors_ngrams
else:
self.vectors_ngrams_norm = \
(self.vectors_ngrams / sqrt((self.vectors_ngrams ** 2).sum(-1))[..., newaxis]).astype(REAL)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
fvocab : str
Optional file path used to save the vocabulary.
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards).
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
| 117,148 | 41.185452 | 150 | py |
poincare_glove | poincare_glove-master/gensim/models/tfidfmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2017 Mohit Rathore <mrmohitrathoremr@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
from functools import partial
from gensim import interfaces, matutils, utils
from six import iteritems
import numpy as np
logger = logging.getLogger(__name__)
def resolve_weights(smartirs):
"""Checks for validity of `smartirs` parameter.
Parameters
----------
smartirs : str
`smartirs` or SMART (System for the Mechanical Analysis and Retrieval of Text)
Information Retrieval System, a mnemonic scheme for denoting tf-idf weighting
variants in the vector space model. The mnemonic for representing a combination
of weights takes the form ddd, where the letters represents the term weighting of the document vector.
for more information visit [1]_.
Returns
-------
w_tf : str
Term frequency weighing:
* `n` - natural,
* `l` - logarithm,
* `a` - augmented,
* `b` - boolean,
* `L` - log average.
w_df : str
Document frequency weighting:
* `n` - none,
* `t` - idf,
* `p` - prob idf.
w_n : str
Document normalization:
* `n` - none,
* `c` - cosine.
Raises
------
ValueError
If `smartirs` is not a string of length 3 or one of the decomposed value
doesn't fit the list of permissible values
References
----------
.. [1] https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System
"""
if not isinstance(smartirs, str) or len(smartirs) != 3:
raise ValueError("Expected a string of length 3 except got " + smartirs)
w_tf, w_df, w_n = smartirs
if w_tf not in 'nlabL':
raise ValueError("Expected term frequency weight to be one of 'nlabL', except got {}".format(w_tf))
if w_df not in 'ntp':
raise ValueError("Expected inverse document frequency weight to be one of 'ntp', except got {}".format(w_df))
if w_n not in 'ncb':
raise ValueError("Expected normalization weight to be one of 'ncb', except got {}".format(w_n))
return w_tf, w_df, w_n
def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0):
"""Compute default inverse-document-frequency for a term with document frequency:
:math:`idf = add + log_{log\_base} \\frac{totaldocs}{doc\_freq}`
Parameters
----------
docfreq : float
Document frequency.
totaldocs : int
Total number of documents.
log_base : float, optional
Base of logarithm.
add : float, optional
Offset.
Returns
-------
float
Inverse document frequency.
"""
return add + np.log(float(totaldocs) / docfreq) / np.log(log_base)
def precompute_idfs(wglobal, dfs, total_docs):
"""Pre-compute the inverse document frequency mapping for all terms.
Parameters
----------
wglobal : function
Custom function for calculation idf, look at "universal" :func:`~gensim.models.tfidfmodel.updated_wglobal`.
dfs : dict
Dictionary with term_id and how many documents this token appeared.
total_docs : int
Total number of document.
Returns
-------
dict
Precomputed idfs in format {term_id_1: idfs_1, term_id_2: idfs_2, ...}
"""
# not strictly necessary and could be computed on the fly in TfidfModel__getitem__.
# this method is here just to speed things up a little.
return {termid: wglobal(df, total_docs) for termid, df in iteritems(dfs)}
def updated_wlocal(tf, n_tf):
"""A scheme to transform `tf` or term frequency based on the value of `n_tf`.
Parameters
----------
tf : int
Term frequency.
n_tf : {'n', 'l', 'a', 'b', 'L'}
Parameter to decide the current transformation scheme.
Returns
-------
float
Calculated wlocal.
"""
if n_tf == "n":
return tf
elif n_tf == "l":
return 1 + np.log(tf) / np.log(2)
elif n_tf == "a":
return 0.5 + (0.5 * tf / tf.max(axis=0))
elif n_tf == "b":
return tf.astype('bool').astype('int')
elif n_tf == "L":
return (1 + np.log(tf) / np.log(2)) / (1 + np.log(tf.mean(axis=0) / np.log(2)))
def updated_wglobal(docfreq, totaldocs, n_df):
"""A scheme to transform `docfreq` or document frequency based on the value of `n_df`.
Parameters
----------
docfreq : int
Document frequency.
totaldocs : int
Total number of documents.
n_df : {'n', 't', 'p'}
Parameter to decide the current transformation scheme.
Returns
-------
float
Calculated wglobal.
"""
if n_df == "n":
return utils.identity(docfreq)
elif n_df == "t":
return np.log(1.0 * totaldocs / docfreq) / np.log(2)
elif n_df == "p":
return np.log((1.0 * totaldocs - docfreq) / docfreq) / np.log(2)
def updated_normalize(x, n_n):
"""Normalizes the final tf-idf value according to the value of `n_n`.
Parameters
----------
x : numpy.ndarray
Input array
n_n : {'n', 'c'}
Parameter that decides the normalizing function to be used.
Returns
-------
numpy.ndarray
Normalized array.
"""
if n_n == "n":
return x
elif n_n == "c":
return matutils.unitvec(x)
class TfidfModel(interfaces.TransformationABC):
"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted TF_IDF matrix (positive floats).
Examples
--------
>>> import gensim.downloader as api
>>> from gensim.models import TfidfModel
>>> from gensim.corpora import Dictionary
>>>
>>> dataset = api.load("text8")
>>> dct = Dictionary(dataset) # fit dictionary
>>> corpus = [dct.doc2bow(line) for line in dataset] # convert dataset to BoW format
>>>
>>> model = TfidfModel(corpus) # fit model
>>> vector = model[corpus[0]] # apply model
"""
def __init__(self, corpus=None, id2word=None, dictionary=None, wlocal=utils.identity,
wglobal=df2idf, normalize=True, smartirs=None):
"""Compute tf-idf by multiplying a local component (term frequency) with a global component
(inverse document frequency), and normalizing the resulting documents to unit length.
Formula for non-normalized weight of term :math:`i` in document :math:`j` in a corpus of :math:`D` documents
.. math:: weight_{i,j} = frequency_{i,j} * log_2 \\frac{D}{document\_freq_{i}}
or, more generally
.. math:: weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document\_freq_{i}, D)
so you can plug in your own custom :math:`wlocal` and :math:`wglobal` functions.
Parameters
----------
corpus : iterable of iterable of (int, int), optional
Input corpus
id2word : {dict, :class:`~gensim.corpora.Dictionary`}, optional
Mapping token - id, that was used for converting input data to bag of words format.
dictionary : :class:`~gensim.corpora.Dictionary`
If `dictionary` is specified, it must be a `corpora.Dictionary` object and it will be used.
to directly construct the inverse document frequency mapping (then `corpus`, if specified, is ignored).
wlocals : function, optional
Function for local weighting, default for `wlocal` is :func:`~gensim.utils.identity`
(other options: :func:`math.sqrt`, :func:`math.log1p`, etc).
wglobal : function, optional
Function for global weighting, default is :func:`~gensim.models.tfidfmodel.df2idf`.
normalize : bool, optional
It dictates how the final transformed vectors will be normalized. `normalize=True` means set to unit length
(default); `False` means don't normalize. You can also set `normalize` to your own function that accepts
and returns a sparse vector.
smartirs : str, optional
SMART (System for the Mechanical Analysis and Retrieval of Text) Information Retrieval System,
a mnemonic scheme for denoting tf-idf weighting variants in the vector space model.
The mnemonic for representing a combination of weights takes the form XYZ,
for example 'ntc', 'bpn' and so on, where the letters represents the term weighting of the document vector.
Term frequency weighing:
* `n` - natural,
* `l` - logarithm,
* `a` - augmented,
* `b` - boolean,
* `L` - log average.
Document frequency weighting:
* `n` - none,
* `t` - idf,
* `p` - prob idf.
Document normalization:
* `n` - none,
* `c` - cosine.
For more information visit [1]_.
"""
self.id2word = id2word
self.wlocal, self.wglobal, self.normalize = wlocal, wglobal, normalize
self.num_docs, self.num_nnz, self.idfs = None, None, None
self.smartirs = smartirs
# If smartirs is not None, override wlocal, wglobal and normalize
if smartirs is not None:
n_tf, n_df, n_n = resolve_weights(smartirs)
self.wlocal = partial(updated_wlocal, n_tf=n_tf)
self.wglobal = partial(updated_wglobal, n_df=n_df)
self.normalize = partial(updated_normalize, n_n=n_n)
if dictionary is not None:
# user supplied a Dictionary object, which already contains all the
# statistics we need to construct the IDF mapping. we can skip the
# step that goes through the corpus (= an optimization).
if corpus is not None:
logger.warning(
"constructor received both corpus and explicit inverse document frequencies; ignoring the corpus"
)
self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
self.dfs = dictionary.dfs.copy()
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
if id2word is None:
self.id2word = dictionary
elif corpus is not None:
self.initialize(corpus)
else:
# NOTE: everything is left uninitialized; presumably the model will
# be initialized in some other way
pass
def __str__(self):
return "TfidfModel(num_docs=%s, num_nnz=%s)" % (self.num_docs, self.num_nnz)
def initialize(self, corpus):
"""Compute inverse document weights, which will be used to modify term frequencies for documents.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
logger.info("collecting document frequencies")
dfs = {}
numnnz, docno = 0, -1
for docno, bow in enumerate(corpus):
if docno % 10000 == 0:
logger.info("PROGRESS: processing document #%i", docno)
numnnz += len(bow)
for termid, _ in bow:
dfs[termid] = dfs.get(termid, 0) + 1
# keep some stats about the training corpus
self.num_docs = docno + 1
self.num_nnz = numnnz
self.dfs = dfs
# and finally compute the idf weights
n_features = max(dfs) if dfs else 0
logger.info(
"calculating IDF weights for %i documents and %i features (%i matrix non-zeros)",
self.num_docs, n_features, self.num_nnz
)
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
def __getitem__(self, bow, eps=1e-12):
"""Get tf-idf representation of the input vector and/or corpus.
bow : {list of (int, int), iterable of iterable of (int, int)}
Input document or copus in BoW format.
eps : float
Threshold value, will remove all position that have tfidf-value less than `eps`.
Returns
-------
vector : list of (int, float)
TfIdf vector, if `bow` is document **OR**
:class:`~gensim.interfaces.TransformedCorpus`
TfIdf corpus, if `bow` is corpus.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge weight,
# as strict application of the IDF formula would dictate)
termid_array, tf_array = [], []
for termid, tf in bow:
termid_array.append(termid)
tf_array.append(tf)
tf_array = self.wlocal(np.array(tf_array))
vector = [
(termid, tf * self.idfs.get(termid))
for termid, tf in zip(termid_array, tf_array) if abs(self.idfs.get(termid, 0.0)) > eps
]
if self.normalize is True:
self.normalize = matutils.unitvec
elif self.normalize is False:
self.normalize = utils.identity
# and finally, normalize the vector either to unit length, or use a
# user-defined normalization function
vector = self.normalize(vector)
# make sure there are no explicit zeroes in the vector (must be sparse)
vector = [(termid, weight) for termid, weight in vector if abs(weight) > eps]
return vector
| 13,748 | 33.807595 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.models.word2vec` instead.
Produce word vectors with deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
NOTE: There are more ways to get word vectors in Gensim than just Word2Vec.
See wrappers for FastText, VarEmbed and WordRank.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews,
visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The word vectors are stored in a KeyedVectors instance in model.wv.
This separates the read-only word vector lookup operations in KeyedVectors from the training code in Word2Vec::
>>> model.wv['computer'] # numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
The word vectors can also be instantiated from an existing file on disk in the word2vec C format
as a KeyedVectors instance::
NOTE: It is impossible to continue training the vectors loaded from the C format because hidden weights,
vocabulary frequency and the binary tree is missing::
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various NLP word tasks with the model. Some of them
are already built-in::
>>> model.wv.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.wv.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> model.wv.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.wv.similarity('woman', 'man')
0.73723527
Probability of a text under the model::
>>> model.score(["The fox jumped over a lazy dog".split()])
0.2158356
Correlation with human opinion on word similarity::
>>> model.wv.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> model.wv.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
If you're finished training a model (i.e. no more updates, only querying),
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
>>> word_vectors = model.wv
>>> del model
to trim unneeded model memory = use much less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import warnings
from gensim.utils import keep_vocab_item, call_on_class_only
from gensim.models.deprecated.keyedvectors import KeyedVectors, Vocab
from gensim.models.word2vec import Word2Vec as NewWord2Vec
from gensim.models.deprecated.old_saveload import SaveLoad
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt,\
empty, sum as np_sum, ones, logaddexp
from scipy.special import expit
from gensim import utils
from gensim import matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def load_old_word2vec(*args, **kwargs):
old_model = Word2Vec.load(*args, **kwargs)
params = {
'size': old_model.vector_size,
'alpha': old_model.alpha,
'window': old_model.window,
'min_count': old_model.min_count,
'max_vocab_size': old_model.__dict__.get('max_vocab_size', None),
'sample': old_model.sample,
'seed': old_model.seed,
'workers': old_model.workers,
'min_alpha': old_model.min_alpha,
'sg': old_model.sg,
'hs': old_model.hs,
'negative': old_model.negative,
'cbow_mean': old_model.cbow_mean,
'hashfxn': old_model.hashfxn,
'iter': old_model.iter,
'null_word': old_model.null_word,
'sorted_vocab': old_model.sorted_vocab,
'batch_words': old_model.batch_words,
'compute_loss': old_model.__dict__.get('compute_loss', None)
}
new_model = NewWord2Vec(**params)
# set trainables attributes
new_model.wv.vectors = old_model.wv.syn0
if hasattr(old_model.wv, 'syn0norm'):
new_model.wv.vectors_norm = old_model.wv.syn0norm
if hasattr(old_model, 'syn1'):
new_model.trainables.syn1 = old_model.syn1
if hasattr(old_model, 'syn1neg'):
new_model.trainables.syn1neg = old_model.syn1neg
if hasattr(old_model, 'syn0_lockf'):
new_model.trainables.vectors_lockf = old_model.syn0_lockf
# set vocabulary attributes
new_model.wv.vocab = old_model.wv.vocab
new_model.wv.index2word = old_model.wv.index2word
new_model.vocabulary.cum_table = old_model.cum_table
new_model.train_count = old_model.train_count
new_model.corpus_count = old_model.corpus_count
new_model.running_training_loss = old_model.__dict__.get('running_training_loss', None)
new_model.total_train_time = old_model.total_train_time
new_model.min_alpha_yet_reached = old_model.min_alpha_yet_reached
new_model.model_trimmed_post_training = old_model.__dict__.get('model_trimmed_post_training', None)
return new_model
def train_batch_sg(model, sentences, alpha, work=None, compute_loss=False):
"""
Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(
model, model.wv.index2word[word.index], word2.index, alpha, compute_loss=compute_loss
)
result += len(word_vocabs)
return result
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None, compute_loss=False):
"""
Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha, compute_loss=compute_loss)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start: pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, l1)
return log_prob_sentence
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None, compute_loss=False, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
if word not in model.wv.vocab:
return
predict_word = model.wv.vocab[word] # target word (NN output)
if is_ft:
l1_vocab = context_vectors_vocab[context_index[0]]
l1_ngrams = np_sum(context_vectors_ngrams[context_index[1:]], axis=0)
if context_index:
l1 = np_sum([l1_vocab, l1_ngrams], axis=0) / len(context_index)
else:
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
model.running_training_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
if is_ft:
model.wv.syn0_vocab[context_index[0]] += neu1e * context_locks_vocab[context_index[0]]
for i in context_index[1:]:
model.wv.syn0_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True,
compute_loss=False, context_vectors=None, context_locks=None, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
model.running_training_loss += sum(-log(expit(-sgn * prod_term)))
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if is_ft:
if not model.cbow_mean and input_word_indices:
neu1e /= (len(input_word_indices[0]) + len(input_word_indices[1]))
for i in input_word_indices[0]:
context_vectors_vocab[i] += neu1e * context_locks_vocab[i]
for i in input_word_indices[1]:
context_vectors_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
context_vectors[i] += neu1e * context_locks[i]
return neu1e
def score_sg_pair(model, word, word2):
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_cbow_pair(model, word, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
class Word2Vec(SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
If you're finished training a model (=no more updates, only querying)
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `wv.save_word2vec_format()`
and `KeyedVectors.load_word2vec_format()`.
"""
def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=0`), CBOW is used.
Otherwise (`sg=1`), skip-gram is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, useful range is (0, 1e-5).
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus. Default is 5.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
`batch_words` = target size (in words) for batches of examples passed to worker threads (and
thus cython routines). Default is 10000. (Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
self.load = call_on_class_only
if FAST_VERSION == -1:
logger.warning('Slow version of %s is being used', __name__)
else:
logger.debug('Fast version of %s is being used', __name__)
self.initialize_word_vectors()
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = int(workers)
self.min_alpha = float(min_alpha)
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
self.model_trimmed_post_training = False
self.compute_loss = compute_loss
self.running_training_loss = 0
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(
sentences, total_examples=self.corpus_count, epochs=self.iter,
start_alpha=self.alpha, end_alpha=self.min_alpha
)
else:
if trim_rule is not None:
logger.warning(
"The rule, if given, is only used to prune vocabulary during build_vocab() "
"and is not stored as part of the model. Model initialized without sentences. "
"trim_rule provided, if any, will be ignored."
)
def initialize_word_vectors(self):
self.wv = KeyedVectors()
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.wv.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in xrange(vocab_size):
train_words_pow += self.wv.vocab[self.wv.index2word[word_index]].count**power
cumulative = 0.0
for word_index in xrange(vocab_size):
cumulative += self.wv.vocab[self.wv.index2word[word_index]].count**power
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.wv.vocab))
# build the huffman tree
heap = list(itervalues(self.wv.vocab))
heapq.heapify(heap)
for i in xrange(len(self.wv.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Vocab(count=min1.count + min2.count, index=i + len(self.wv.vocab), left=min1, right=min2)
)
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.wv.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.wv.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey
# trim by min_count & precalculate downsampling
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
self.finalize_vocab(update=update) # build tables & arrays
def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):
"""
Build vocabulary from a dictionary of word frequencies.
Build model vocabulary from a passed dictionary that contains (word,word count).
Words must be of type unicode strings.
Parameters
----------
`word_freq` : dict
Word,Word_Count dictionary.
`keep_raw_vocab` : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
`corpus_count`: int
Even if no corpus is provided, this argument can set corpus_count explicitly.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
`update`: bool
If true, the new provided words in `word_freq` dict will be added to model's vocab.
Returns
--------
None
Examples
--------
>>> from gensim.models.word2vec import Word2Vec
>>> model= Word2Vec()
>>> model.build_vocab_from_freq({"Word1": 15, "Word2": 20})
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i different raw word, with total frequency of %i",
len(raw_vocab), sum(itervalues(raw_vocab))
)
# Since no sentences are provided, this is to control the corpus_count
self.corpus_count = corpus_count if corpus_count else 0
self.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
self.finalize_vocab(update=update) # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence)
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1
)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words
def scale_vocab(self, min_count=None, sample=None, dry_run=False,
keep_raw_vocab=False, trim_rule=None, update=False):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.wv.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.wv.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info(
"min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info(
"min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
min_count, retain_total, retain_pct, original_total, drop_total
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
if word in self.wv.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
self.wv.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info(
"New added %i unique words (%i%% of original %i) "
"and increased the count of %i pre-existing words (%i%% of original %i)",
len(new_words), new_unique_pct, original_unique_total, len(pre_exist_words),
pre_exist_unique_pct, original_unique_total
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.wv.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info(
"downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'memory': self.estimate_memory(vocab_size=len(retain_words))
}
return report_values
def finalize_vocab(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
if not self.wv.index2word:
self.scale_vocab()
if self.sorted_vocab and not update:
self.sort_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.wv.vocab)
self.wv.index2word.append(word)
self.wv.vocab[word] = v
# set initial input/projection and hidden weights
if not update:
self.reset_weights()
else:
self.update_weights()
def sort_vocab(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if len(self.wv.syn0):
raise RuntimeError("cannot sort vocabulary after model weights already initialized.")
self.wv.index2word.sort(key=lambda word: self.wv.vocab[word].count, reverse=True)
for i, word in enumerate(self.wv.index2word):
self.wv.vocab[word].index = i
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.wv.vocab = other_model.wv.vocab
self.wv.index2word = other_model.wv.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, sentences, alpha, inits):
"""
Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence) for sentence in job)
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=None):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progres-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) MUST be provided. (If the corpus is the same as was provided to
`build_vocab()`, the count of examples in that corpus will be available in the model's
`corpus_count` property.)
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument MUST be provided. In the common and recommended case, where `train()`
is only called once, the model's cached `iter` value should be supplied as `epochs` value.
"""
if self.model_trimmed_post_training:
raise RuntimeError("Parameters for training were discarded using model_trimmed_post_training method")
if FAST_VERSION < 0:
warnings.warn(
"C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training."
)
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
if compute_loss:
self.compute_loss = compute_loss
self.running_training_loss = 0
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s negative=%s window=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative, self.window
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.syn0):
raise RuntimeError("you must first finalize vocabulary before training the model")
if not hasattr(self, 'corpus_count'):
raise ValueError(
"The number of sentences in the training corpus is missing. "
"Did you load the model via KeyedVectors.load_word2vec_format?"
"Models loaded via load_word2vec_format don't support further training. "
"Instead start with a blank model, scan_vocab on the new corpus, "
"intersect_word2vec_format with the old model, then train."
)
if total_words is None and total_examples is None:
raise ValueError(
"You must specify either total_examples or total_words, for proper alpha and progress calculations. "
"The usual value is total_examples=model.corpus_count."
)
if epochs is None:
raise ValueError("You must specify an explict epochs count. The usual value is epochs=model.iter.")
start_alpha = start_alpha or self.alpha
end_alpha = end_alpha or self.min_alpha
job_tally = 0
if epochs > 1:
sentences = utils.RepeatCorpusNTimes(sentences, epochs)
total_words = total_words and total_words * epochs
total_examples = total_examples and total_examples * epochs
def worker_loop():
"""Train the model, lifting lists of sentences from the job_queue."""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
sentences, alpha = job
tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1))
progress_queue.put((len(sentences), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def job_producer():
"""Fill jobs queue using the input `sentences` iterator."""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = start_alpha
if next_alpha > self.min_alpha_yet_reached:
logger.warning("Effective 'alpha' higher than previous training cycles")
self.min_alpha_yet_reached = next_alpha
job_no = 0
for sent_idx, sentence in enumerate(sentences):
sentence_length = self._raw_word_count([sentence])
# can we fit this sentence into the existing job batch?
if batch_size + sentence_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(sentence)
batch_size += sentence_length
else:
# no => submit the existing job
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha
)
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if end_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
progress = 1.0 * pushed_words / total_words
next_alpha = start_alpha - (start_alpha - end_alpha) * progress
next_alpha = max(end_alpha, next_alpha)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [sentence], sentence_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha
)
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in xrange(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
unfinished_worker_count = len(workers)
workers.append(threading.Thread(target=job_producer))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
example_count, trained_word_count, raw_word_count = 0, 0, word_count
start, next_report = default_timer() - 0.00001, 1.0
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
logger.info(
"training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed
)
if job_tally < 10 * self.workers:
logger.warning(
"under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay"
)
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"supplied example count (%i) did not equal expected count (%i)", example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words
)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that).
We have currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with hs=1 and negative=0 for this to work.
Note that you should specify total_sentences; we'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the article by [#taddy]_ and the gensim demo at [#deepir]_ for examples of
how to use such scores in document classification.
.. [#taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations,
in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
.. [#deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb
"""
if FAST_VERSION < 0:
warnings.warn(
"C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness."
)
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def clear_sims(self):
"""
Removes all L2-normalized vectors for words from the model.
You will have to recompute them using init_sims method.
"""
self.wv.syn0norm = None
def update_weights(self):
"""
Copy all the existing weights, and reset the weights for the newly
added vocabulary.
"""
logger.info("updating layer weights")
gained_vocab = len(self.wv.vocab) - len(self.wv.syn0)
newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)
# randomize the remaining words
for i in xrange(len(self.wv.syn0), len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
newsyn0[i - len(self.wv.syn0)] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
# Raise an error if an online update is run before initial training on a corpus
if not len(self.wv.syn0):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
self.wv.syn0 = vstack([self.wv.syn0, newsyn0])
if self.hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
self.wv.syn0norm = None
# do not suppress learning for already learned words
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
self.wv.syn0[i] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
self.wv.syn0norm = None
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
`lockf` is a lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
"""
overlap_count = 0
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
logger.info("merged %d vectors into %s matrix from %s", overlap_count, self.wv.syn0.shape, fname)
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""
Deprecated. Use self.wv.most_similar() instead.
Refer to the documentation for `gensim.models.KeyedVectors.most_similar`
"""
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
def wmdistance(self, document1, document2):
"""
Deprecated. Use self.wv.wmdistance() instead.
Refer to the documentation for `gensim.models.KeyedVectors.wmdistance`
"""
return self.wv.wmdistance(document1, document2)
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""
Deprecated. Use self.wv.most_similar_cosmul() instead.
Refer to the documentation for `gensim.models.KeyedVectors.most_similar_cosmul`
"""
return self.wv.most_similar_cosmul(positive, negative, topn)
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_word() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similar_by_word`
"""
return self.wv.similar_by_word(word, topn, restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_vector() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similar_by_vector`
"""
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
def doesnt_match(self, words):
"""
Deprecated. Use self.wv.doesnt_match() instead.
Refer to the documentation for `gensim.models.KeyedVectors.doesnt_match`
"""
return self.wv.doesnt_match(words)
def __getitem__(self, words):
"""
Deprecated. Use self.wv.__getitem__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`
"""
return self.wv.__getitem__(words)
def __contains__(self, word):
"""
Deprecated. Use self.wv.__contains__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__contains__`
"""
return self.wv.__contains__(word)
def similarity(self, w1, w2):
"""
Deprecated. Use self.wv.similarity() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similarity`
"""
return self.wv.similarity(w1, w2)
def n_similarity(self, ws1, ws2):
"""
Deprecated. Use self.wv.n_similarity() instead.
Refer to the documentation for `gensim.models.KeyedVectors.n_similarity`
"""
return self.wv.n_similarity(ws1, ws2)
def predict_output_word(self, context_words_list, topn=10):
"""Report the probability distribution of the center word given the context words
as input to the trained model."""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'syn0') or not hasattr(self, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word_vocabs = [self.wv.vocab[w] for w in context_words_list if w in self.wv.vocab]
if not word_vocabs:
warnings.warn("All the input context words are out-of-vocabulary for the current model.")
return None
word2_indices = [word.index for word in word_vocabs]
l1 = np_sum(self.wv.syn0[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
prob_values = exp(dot(l1, self.syn1neg.T)) # propagate hidden -> output and take softmax to get probabilities
prob_values /= sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]
def init_sims(self, replace=False):
"""
init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute
of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors
"""
if replace and hasattr(self, 'syn1'):
del self.syn1
return self.wv.init_sims(replace)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.wv.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total']
)
return report
@staticmethod
def log_accuracy(section):
return KeyedVectors.log_accuracy(section)
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
most_similar = most_similar or KeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
"""
Deprecated. Use self.wv.log_evaluate_word_pairs() instead.
Refer to the documentation for `gensim.models.KeyedVectors.log_evaluate_word_pairs`
"""
return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""
Deprecated. Use self.wv.evaluate_word_pairs() instead.
Refer to the documentation for `gensim.models.KeyedVectors.evaluate_word_pairs`
"""
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index2word), self.vector_size, self.alpha
)
def _minimize_model(self, save_syn1=False, save_syn1neg=False, save_syn0_lockf=False):
warnings.warn(
"This method would be deprecated in the future. "
"Keep just_word_vectors = model.wv to retain just the KeyedVectors instance "
"for read-only querying of word vectors."
)
if save_syn1 and save_syn1neg and save_syn0_lockf:
return
if hasattr(self, 'syn1') and not save_syn1:
del self.syn1
if hasattr(self, 'syn1neg') and not save_syn1neg:
del self.syn1neg
if hasattr(self, 'syn0_lockf') and not save_syn0_lockf:
del self.syn0_lockf
self.model_trimmed_post_training = True
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
"""
Discard parameters that are used in training and score. Use if you're sure you're done training a model.
If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
"""
if replace_word_vectors_with_normalized:
self.init_sims(replace=True)
self._minimize_model()
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model.wv, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.wv.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
elif hasattr(v, 'sample_probability'):
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.wv.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
def _load_specials(self, *args, **kwargs):
super(Word2Vec, self)._load_specials(*args, **kwargs)
# loading from a pre-KeyedVectors word2vec model
if not hasattr(self, 'wv'):
wv = KeyedVectors()
wv.syn0 = self.__dict__.get('syn0', [])
wv.syn0norm = self.__dict__.get('syn0norm', None)
wv.vocab = self.__dict__.get('vocab', {})
wv.index2word = self.__dict__.get('index2word', [])
self.wv = wv
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""Deprecated. Use model.wv.save_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use model.wv.save_word2vec_format instead.")
def get_latest_training_loss(self):
return self.running_training_loss
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or not clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences(object):
"""
Works like word2vec.LineSentence, but will process all files in a directory in alphabetical order by filename.
The directory can only contain files that can be read by LineSentence: .bz2, .gz, and text files.
Any file not ending with .bz2 or .gz is assumed to be a text file. Does not work with subdirectories.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` should be a path to a directory (as a string) where all files can be opened by the
LineSentence class. Each file will be read up to `limit` lines (or not clipped if limit is None, the default).
Example::
sentences = PathLineSentences(os.getcwd() + '\\corpus\\')
The files in the directory should be either text files, .bz2 files, or .gz files.
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.smart_open(file_name) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
logger.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
| 87,512 | 44.938583 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/old_saveload.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.utils` instead.
Class containing the old SaveLoad class with modeified `unpickle` function is support loading models saved using
an older gensim version.
"""
from __future__ import with_statement
import logging
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
import re
import sys
import numpy as np
import scipy.sparse
from six import iteritems
from smart_open import smart_open
if sys.version_info[0] >= 3:
unicode = str
logger = logging.getLogger(__name__)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
class SaveLoad(object):
"""Class which inherit from this class have save/load functions, which un/pickle them to disk.
Warnings
--------
This uses pickle for de/serializing, so objects must not contain unpicklable attributes,
such as lambda functions etc.
"""
@classmethod
def load(cls, fname, mmap=None):
"""Load a previously saved object (using :meth:`~gensim.utils.SaveLoad.save`) from file.
Parameters
----------
fname : str
Path to file that contains needed object.
mmap : str, optional
Memory-map option. If the object was saved with large arrays stored separately, you can load these arrays
via mmap (shared memory) using `mmap='r'.
If the file being loaded is compressed (either '.gz' or '.bz2'), then `mmap=None` **must be** set.
See Also
--------
:meth:`~gensim.utils.SaveLoad.save`
Returns
-------
object
Object loaded from `fname`.
Raises
------
IOError
When methods are called on instance (should be called from class).
"""
logger.info("loading %s object from %s", cls.__name__, fname)
compress, subname = SaveLoad._adapt_by_suffix(fname)
obj = unpickle(fname)
obj._load_specials(fname, mmap, compress, subname)
logger.info("loaded %s", fname)
return obj
def _load_specials(self, fname, mmap, compress, subname):
"""Loads any attributes that were stored specially, and gives the same opportunity
to recursively included :class:`~gensim.utils.SaveLoad` instances.
Parameters
----------
fname : str
Path to file that contains needed object.
mmap : str
Memory-map option.
compress : bool
Set to True if file is compressed.
subname : str
...
"""
def mmap_error(obj, filename):
return IOError(
'Cannot mmap compressed object %s in file %s. ' % (obj, filename) +
'Use `load(fname, mmap=None)` or uncompress files manually.'
)
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info("loading %s recursively from %s.* with mmap=%s", attrib, cfname, mmap)
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info("loading %s from %s with mmap=%s", attrib, subname(fname, attrib), mmap)
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = np.load(subname(fname, attrib))['val']
else:
val = np.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info("loading %s from %s with mmap=%s", attrib, subname(fname, attrib), mmap)
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with np.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = np.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = np.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = np.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info("setting ignored attribute %s to None", attrib)
setattr(self, attrib, None)
@staticmethod
def _adapt_by_suffix(fname):
"""Give appropriate compress setting and filename formula.
Parameters
----------
fname : str
Input filename.
Returns
-------
(bool, function)
First argument will be True if `fname` compressed.
"""
compress, suffix = (True, 'npz') if fname.endswith('.gz') or fname.endswith('.bz2') else (False, 'npy')
return compress, lambda *args: '.'.join(args + (suffix,))
def _smart_save(self, fname, separately=None, sep_limit=10 * 1024**2, ignore=frozenset(), pickle_protocol=2):
"""Save the object to file.
Parameters
----------
fname : str
Path to file.
separately : list, optional
Iterable of attributes than need to store distinctly.
sep_limit : int, optional
Limit for separation.
ignore : frozenset, optional
Attributes that shouldn't be store.
pickle_protocol : int, optional
Protocol number for pickle.
Notes
-----
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
See Also
--------
:meth:`~gensim.utils.SaveLoad.load`
"""
logger.info("saving %s object under %s, separately %s", self.__class__.__name__, fname, separately)
compress, subname = SaveLoad._adapt_by_suffix(fname)
restores = self._save_specials(fname, separately, sep_limit, ignore, pickle_protocol,
compress, subname)
try:
pickle(self, fname, protocol=pickle_protocol)
finally:
# restore attribs handled specially
for obj, asides in restores:
for attrib, val in iteritems(asides):
setattr(obj, attrib, val)
logger.info("saved %s", fname)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Save aside any attributes that need to be handled separately, including
by recursion any attributes that are themselves :class:`~gensim.utils.SaveLoad` instances.
Parameters
----------
fname : str
Output filename.
separately : list or None
Iterable of attributes than need to store distinctly
sep_limit : int
Limit for separation.
ignore : iterable of str
Attributes that shouldn't be store.
pickle_protocol : int
Protocol number for pickle.
compress : bool
If True - compress output with :func:`numpy.savez_compressed`.
subname : function
Produced by :meth:`~gensim.utils.SaveLoad._adapt_by_suffix`
Returns
-------
list of (obj, {attrib: value, ...})
Settings that the caller should use to restore each object's attributes that were set aside
during the default :func:`~gensim.utils.pickle`.
"""
asides = {}
sparse_matrices = (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)
if separately is None:
separately = []
for attrib, val in iteritems(self.__dict__):
if isinstance(val, np.ndarray) and val.size >= sep_limit:
separately.append(attrib)
elif isinstance(val, sparse_matrices) and val.nnz >= sep_limit:
separately.append(attrib)
# whatever's in `separately` or `ignore` at this point won't get pickled
for attrib in separately + list(ignore):
if hasattr(self, attrib):
asides[attrib] = getattr(self, attrib)
delattr(self, attrib)
recursive_saveloads = []
restores = []
for attrib, val in iteritems(self.__dict__):
if hasattr(val, '_save_specials'): # better than 'isinstance(val, SaveLoad)' if IPython reloading
recursive_saveloads.append(attrib)
cfname = '.'.join((fname, attrib))
restores.extend(val._save_specials(cfname, None, sep_limit, ignore, pickle_protocol, compress, subname))
try:
numpys, scipys, ignoreds = [], [], []
for attrib, val in iteritems(asides):
if isinstance(val, np.ndarray) and attrib not in ignore:
numpys.append(attrib)
logger.info("storing np array '%s' to %s", attrib, subname(fname, attrib))
if compress:
np.savez_compressed(subname(fname, attrib), val=np.ascontiguousarray(val))
else:
np.save(subname(fname, attrib), np.ascontiguousarray(val))
elif isinstance(val, (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)) and attrib not in ignore:
scipys.append(attrib)
logger.info("storing scipy.sparse array '%s' under %s", attrib, subname(fname, attrib))
if compress:
np.savez_compressed(
subname(fname, attrib, 'sparse'),
data=val.data,
indptr=val.indptr,
indices=val.indices
)
else:
np.save(subname(fname, attrib, 'data'), val.data)
np.save(subname(fname, attrib, 'indptr'), val.indptr)
np.save(subname(fname, attrib, 'indices'), val.indices)
data, indptr, indices = val.data, val.indptr, val.indices
val.data, val.indptr, val.indices = None, None, None
try:
# store array-less object
pickle(val, subname(fname, attrib), protocol=pickle_protocol)
finally:
val.data, val.indptr, val.indices = data, indptr, indices
else:
logger.info("not storing attribute %s", attrib)
ignoreds.append(attrib)
self.__dict__['__numpys'] = numpys
self.__dict__['__scipys'] = scipys
self.__dict__['__ignoreds'] = ignoreds
self.__dict__['__recursive_saveloads'] = recursive_saveloads
except Exception:
# restore the attributes if exception-interrupted
for attrib, val in iteritems(asides):
setattr(self, attrib, val)
raise
return restores + [(self, asides)]
def save(self, fname_or_handle, separately=None, sep_limit=10 * 1024**2, ignore=frozenset(), pickle_protocol=2):
"""Save the object to file.
Parameters
----------
fname_or_handle : str or file-like
Path to output file or already opened file-like object. If the object is a file handle,
no special array handling will be performed, all attributes will be saved to the same file.
separately : list of str or None, optional
If None - automatically detect large numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and allows mmap'ing large arrays
back on load efficiently.
If list of str - this attributes will be stored in separate files, the automatic check
is not performed in this case.
sep_limit : int
Limit for automatic separation.
ignore : frozenset of str
Attributes that shouldn't be serialize/store.
pickle_protocol : int
Protocol number for pickle.
See Also
--------
:meth:`~gensim.utils.SaveLoad.load`
"""
try:
_pickle.dump(self, fname_or_handle, protocol=pickle_protocol)
logger.info("saved %s object", self.__class__.__name__)
except TypeError: # `fname_or_handle` does not have write attribute
self._smart_save(fname_or_handle, separately, sep_limit, ignore, pickle_protocol=pickle_protocol)
def unpickle(fname):
"""Load object from `fname`.
Parameters
----------
fname : str
Path to pickle file.
Returns
-------
object
Python object loaded from `fname`.
"""
with smart_open(fname, 'rb') as f:
# Because of loading from S3 load can't be used (missing readline in smart_open)
file_bytes = f.read()
file_bytes = file_bytes.replace(b'gensim.models.word2vec', b'gensim.models.deprecated.word2vec')
file_bytes = file_bytes.replace(b'gensim.models.keyedvectors', b'gensim.models.deprecated.keyedvectors')
file_bytes = file_bytes.replace(b'gensim.models.doc2vec', b'gensim.models.deprecated.doc2vec')
file_bytes = file_bytes.replace(b'gensim.models.fasttext', b'gensim.models.deprecated.fasttext')
file_bytes = file_bytes.replace(
b'gensim.models.wrappers.fasttext', b'gensim.models.deprecated.fasttext_wrapper')
if sys.version_info > (3, 0):
return _pickle.loads(file_bytes, encoding='latin1')
else:
return _pickle.loads(file_bytes)
def pickle(obj, fname, protocol=2):
"""Pickle object `obj` to file `fname`.
Parameters
----------
obj : object
Any python object.
fname : str
Path to pickle file.
protocol : int, optional
Pickle protocol number, default is 2 to support compatible across python 2.x and 3.x.
"""
with smart_open(fname, 'wb') as fout: # 'b' for binary, needed on Windows
_pickle.dump(obj, fout, protocol=protocol)
| 15,023 | 36.56 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/fasttext.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: Chinmaya Pancholi <chinmayapancholi13@gmail.com>, Shiva Manne <s.manne@rare-technologies.com>
# Copyright (C) 2017 RaRe Technologies s.r.o.
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.models.fasttext` instead.
Learn word representations via fasttext's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_.
Notes
-----
There are more ways to get word vectors in Gensim than just FastText.
See wrappers for VarEmbed and WordRank or Word2Vec
This module allows training a word embedding from a training corpus with the additional ability
to obtain word vectors for out-of-vocabulary words.
For a tutorial on gensim's native fasttext, refer to the noteboook -- [2]_
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) fasttext training**
.. [1] P. Bojanowski, E. Grave, A. Joulin, T. Mikolov
Enriching Word Vectors with Subword Information. In arXiv preprint arXiv:1607.04606.
https://arxiv.org/abs/1607.04606
.. [2] https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb
"""
import logging
import numpy as np
from numpy import zeros, ones, vstack, sum as np_sum, empty, float32 as REAL
from gensim.models.deprecated.word2vec import Word2Vec, train_sg_pair, train_cbow_pair
from gensim.models.deprecated.fasttext_wrapper import FastTextKeyedVectors
from gensim.models.deprecated.fasttext_wrapper import FastText as Ft_Wrapper, compute_ngrams, ft_hash
from gensim.models.fasttext import FastText as NewFastText
logger = logging.getLogger(__name__)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def load_old_fasttext(*args, **kwargs):
old_model = FastText.load(*args, **kwargs)
params = {
'size': old_model.vector_size,
'alpha': old_model.alpha,
'window': old_model.window,
'min_count': old_model.min_count,
'max_vocab_size': old_model.__dict__.get('max_vocab_size', None),
'sample': old_model.sample,
'seed': old_model.seed,
'workers': old_model.workers,
'min_alpha': old_model.min_alpha,
'sg': old_model.sg,
'hs': old_model.hs,
'negative': old_model.negative,
'cbow_mean': old_model.cbow_mean,
'hashfxn': old_model.hashfxn,
'iter': old_model.iter,
'null_word': old_model.null_word,
'sorted_vocab': old_model.sorted_vocab,
'batch_words': old_model.batch_words,
'min_n': old_model.min_n,
'max_n': old_model.max_n,
'word_ngrams': old_model.word_ngrams,
'bucket': old_model.bucket
}
new_model = NewFastText(**params)
# set trainables attributes
new_model.wv.vectors = old_model.wv.syn0
new_model.wv.vectors_vocab = old_model.wv.syn0_vocab
new_model.wv.vectors_ngrams = old_model.wv.syn0_ngrams
if hasattr(old_model.wv, 'syn0norm'):
new_model.wv.vectors_norm = old_model.wv.syn0norm
if hasattr(old_model, 'syn1'):
new_model.trainables.syn1 = old_model.syn1
if hasattr(old_model, 'syn1neg'):
new_model.trainables.syn1neg = old_model.syn1neg
if hasattr(old_model, 'syn0_lockf'):
new_model.trainables.vectors_lockf = old_model.syn0_lockf
if hasattr(old_model, 'syn0_vocab_lockf'):
new_model.trainables.vectors_vocab_lockf = old_model.syn0_vocab_lockf
if hasattr(old_model, 'syn0_ngrams_lockf'):
new_model.trainables.vectors_ngrams_lockf = old_model.syn0_ngrams_lockf
if hasattr(old_model.wv, 'syn0_vocab_norm'):
new_model.trainables.vectors_vocab_norm = old_model.wv.syn0_vocab_norm
if hasattr(old_model.wv, 'syn0_ngrams_norm'):
new_model.trainables.vectors_ngrams_norm = old_model.wv.syn0_ngrams_norm
# set vocabulary attributes
new_model.wv.vocab = old_model.wv.vocab
new_model.wv.index2word = old_model.wv.index2word
new_model.vocabulary.cum_table = old_model.cum_table
new_model.wv.hash2index = old_model.wv.hash2index
new_model.train_count = old_model.train_count
new_model.corpus_count = old_model.corpus_count
new_model.running_training_loss = old_model.running_training_loss
new_model.total_train_time = old_model.total_train_time
new_model.min_alpha_yet_reached = old_model.min_alpha_yet_reached
new_model.model_trimmed_post_training = old_model.model_trimmed_post_training
new_model.trainables.num_ngram_vectors = old_model.num_ngram_vectors
return new_model
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
"""Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window)
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
word2_subwords = []
vocab_subwords_indices = []
ngrams_subwords_indices = []
for index in word2_indices:
vocab_subwords_indices += [index]
word2_subwords += model.wv.ngrams_word[model.wv.index2word[index]]
for subword in word2_subwords:
ngrams_subwords_indices.append(model.wv.ngrams[subword])
l1_vocab = np_sum(model.wv.syn0_vocab[vocab_subwords_indices], axis=0) # 1 x vector_size
l1_ngrams = np_sum(model.wv.syn0_ngrams[ngrams_subwords_indices], axis=0) # 1 x vector_size
l1 = np_sum([l1_vocab, l1_ngrams], axis=0)
subwords_indices = [vocab_subwords_indices] + [ngrams_subwords_indices]
if (subwords_indices[0] or subwords_indices[1]) and model.cbow_mean:
l1 /= (len(subwords_indices[0]) + len(subwords_indices[1]))
# train on the sliding window for target word
train_cbow_pair(model, word, subwords_indices, l1, alpha, is_ft=True)
result += len(word_vocabs)
return result
def train_batch_sg(model, sentences, alpha, work=None, neu1=None):
"""Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
subwords_indices = [word.index]
word2_subwords = model.wv.ngrams_word[model.wv.index2word[word.index]]
for subword in word2_subwords:
subwords_indices.append(model.wv.ngrams[subword])
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
if pos2 != pos: # don't train on the `word` itself
train_sg_pair(model, model.wv.index2word[word2.index], subwords_indices, alpha, is_ft=True)
result += len(word_vocabs)
return result
class FastText(Word2Vec):
"""Class for training, using and evaluating word representations learned using method
described in [1]_ aka Fasttext.
The model can be stored/loaded via its :meth:`~gensim.models.fasttext.FastText.save()` and
:meth:`~gensim.models.fasttext.FastText.load()` methods, or loaded in a format compatible with the original
fasttext implementation via :meth:`~gensim.models.fasttext.FastText.load_fasttext_format()`.
"""
def __init__(
self, sentences=None, sg=0, hs=0, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0, min_n=3, max_n=6, sorted_vocab=1,
bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH):
"""Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
sg : int {1, 0}
Defines the training algorithm. If 1, skip-gram is used, otherwise, CBOW is employed.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int
Number of iterations (epochs) over the corpus.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information.
If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
Examples
--------
Initialize and train a `FastText` model
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> say_vector = model['say'] # get vector for word
>>> of_vector = model['of'] # get vector for out-of-vocab word
"""
# fastText specific params
self.bucket = bucket
self.word_ngrams = word_ngrams
self.min_n = min_n
self.max_n = max_n
if self.word_ngrams <= 1 and self.max_n == 0:
self.bucket = 0
super(FastText, self).__init__(
sentences=sentences, size=size, alpha=alpha, window=window, min_count=min_count,
max_vocab_size=max_vocab_size, sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=sg, hs=hs, negative=negative, cbow_mean=cbow_mean, hashfxn=hashfxn, iter=iter, null_word=null_word,
trim_rule=trim_rule, sorted_vocab=sorted_vocab, batch_words=batch_words)
def initialize_word_vectors(self):
"""Initializes FastTextKeyedVectors instance to store all vocab/ngram vectors for the model."""
self.wv = FastTextKeyedVectors()
self.wv.min_n = self.min_n
self.wv.max_n = self.max_n
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
progress_per : int
Indicates how many words to process before showing/updating the progress.
update: bool
If true, the new words in `sentences` will be added to model's vocab.
Example
-------
Train a model and update vocab for online training
>>> from gensim.models import FastText
>>> sentences_1 = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> sentences_2 = [["dude", "say", "wazzup!"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences_1)
>>> model.train(sentences_1, total_examples=model.corpus_count, epochs=model.iter)
>>> model.build_vocab(sentences_2, update=True)
>>> model.train(sentences_2, total_examples=model.corpus_count, epochs=model.iter)
"""
if update:
if not len(self.wv.vocab):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus "
"before doing an online update.")
self.old_vocab_len = len(self.wv.vocab)
self.old_hash2index_len = len(self.wv.hash2index)
super(FastText, self).build_vocab(
sentences, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, progress_per=progress_per, update=update)
self.init_ngrams(update=update)
def init_ngrams(self, update=False):
"""Compute ngrams of all words present in vocabulary and stores vectors for only those ngrams.
Vectors for other ngrams are initialized with a random uniform distribution in FastText.
Parameters
----------
update : bool
If True, the new vocab words and their new ngrams word vectors are initialized
with random uniform distribution and updated/added to the existing vocab word and ngram vectors.
"""
if not update:
self.wv.ngrams = {}
self.wv.syn0_vocab = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)
self.syn0_vocab_lockf = ones((len(self.wv.vocab), self.vector_size), dtype=REAL)
self.wv.syn0_ngrams = empty((self.bucket, self.vector_size), dtype=REAL)
self.syn0_ngrams_lockf = ones((self.bucket, self.vector_size), dtype=REAL)
all_ngrams = []
for w, v in self.wv.vocab.items():
self.wv.ngrams_word[w] = compute_ngrams(w, self.min_n, self.max_n)
all_ngrams += self.wv.ngrams_word[w]
all_ngrams = list(set(all_ngrams))
self.num_ngram_vectors = len(all_ngrams)
logger.info("Total number of ngrams is %d", len(all_ngrams))
self.wv.hash2index = {}
ngram_indices = []
new_hash_count = 0
for i, ngram in enumerate(all_ngrams):
ngram_hash = ft_hash(ngram) % self.bucket
if ngram_hash in self.wv.hash2index:
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
else:
ngram_indices.append(ngram_hash % self.bucket)
self.wv.hash2index[ngram_hash] = new_hash_count
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
new_hash_count = new_hash_count + 1
self.wv.syn0_ngrams = self.wv.syn0_ngrams.take(ngram_indices, axis=0)
self.syn0_ngrams_lockf = self.syn0_ngrams_lockf.take(ngram_indices, axis=0)
self.reset_ngram_weights()
else:
new_ngrams = []
for w, v in self.wv.vocab.items():
self.wv.ngrams_word[w] = compute_ngrams(w, self.min_n, self.max_n)
new_ngrams += [ng for ng in self.wv.ngrams_word[w] if ng not in self.wv.ngrams]
new_ngrams = list(set(new_ngrams))
logger.info("Number of new ngrams is %d", len(new_ngrams))
new_hash_count = 0
for i, ngram in enumerate(new_ngrams):
ngram_hash = ft_hash(ngram) % self.bucket
if ngram_hash not in self.wv.hash2index:
self.wv.hash2index[ngram_hash] = new_hash_count + self.old_hash2index_len
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
new_hash_count = new_hash_count + 1
else:
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
rand_obj = np.random
rand_obj.seed(self.seed)
new_vocab_rows = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size,
(len(self.wv.vocab) - self.old_vocab_len, self.vector_size)
).astype(REAL)
new_vocab_lockf_rows = ones((len(self.wv.vocab) - self.old_vocab_len, self.vector_size), dtype=REAL)
new_ngram_rows = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size,
(len(self.wv.hash2index) - self.old_hash2index_len, self.vector_size)
).astype(REAL)
new_ngram_lockf_rows = ones(
(len(self.wv.hash2index) - self.old_hash2index_len,
self.vector_size),
dtype=REAL)
self.wv.syn0_vocab = vstack([self.wv.syn0_vocab, new_vocab_rows])
self.syn0_vocab_lockf = vstack([self.syn0_vocab_lockf, new_vocab_lockf_rows])
self.wv.syn0_ngrams = vstack([self.wv.syn0_ngrams, new_ngram_rows])
self.syn0_ngrams_lockf = vstack([self.syn0_ngrams_lockf, new_ngram_lockf_rows])
def reset_ngram_weights(self):
"""Reset all projection weights to an initial (untrained) state,
but keep the existing vocabulary and their ngrams.
"""
rand_obj = np.random
rand_obj.seed(self.seed)
for index in range(len(self.wv.vocab)):
self.wv.syn0_vocab[index] = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size, self.vector_size
).astype(REAL)
for index in range(len(self.wv.hash2index)):
self.wv.syn0_ngrams[index] = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size, self.vector_size
).astype(REAL)
def _do_train_job(self, sentences, alpha, inits):
"""Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
alpha : float
The current learning rate.
inits : (:class:`numpy.ndarray`, :class:`numpy.ndarray`)
Each worker's private work memory.
Returns
-------
(int, int)
Tuple of (effective word count after ignoring unknown words and sentence length trimming, total word count)
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, neu1)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None,
word_count=0, queue_factor=2, report_delay=1.0):
"""Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For FastText, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progress-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) **MUST** be provided (if the corpus is the same as was provided to
:meth:`~gensim.models.fasttext.FastText.build_vocab()`, the count of examples in that corpus
will be available in the model's :attr:`corpus_count` property).
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case,
where :meth:`~gensim.models.fasttext.FastText.train()` is only called once,
the model's cached `iter` value should be supplied as `epochs` value.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float
Initial learning rate.
end_alpha : float
Final learning rate. Drops linearly from `start_alpha`.
word_count : int
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
Examples
--------
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences)
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
"""
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
Word2Vec.train(
self, sentences, total_examples=self.corpus_count, epochs=self.iter,
start_alpha=self.alpha, end_alpha=self.min_alpha)
self.get_vocab_word_vecs()
def __getitem__(self, word):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
A single word whose vector needs to be returned.
Returns
-------
:class:`numpy.ndarray`
The word's representations in vector space, as a 1D numpy array.
Raises
------
KeyError
For words with all ngrams absent, a KeyError is raised.
Example
-------
>>> from gensim.models import FastText
>>> from gensim.test.utils import datapath
>>>
>>> trained_model = FastText.load_fasttext_format(datapath('lee_fasttext'))
>>> meow_vector = trained_model['hello'] # get vector for word
"""
return self.word_vec(word)
def get_vocab_word_vecs(self):
"""Calculate vectors for words in vocabulary and stores them in `wv.syn0`."""
for w, v in self.wv.vocab.items():
word_vec = np.copy(self.wv.syn0_vocab[v.index])
ngrams = self.wv.ngrams_word[w]
ngram_weights = self.wv.syn0_ngrams
for ngram in ngrams:
word_vec += ngram_weights[self.wv.ngrams[ngram]]
word_vec /= (len(ngrams) + 1)
self.wv.syn0[v.index] = word_vec
def word_vec(self, word, use_norm=False):
"""Get the word's representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
A single word whose vector needs to be returned.
use_norm : bool
If True, returns normalized vector.
Returns
-------
:class:`numpy.ndarray`
The word's representations in vector space, as a 1D numpy array.
Raises
------
KeyError
For words with all ngrams absent, a KeyError is raised.
Example
-------
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> meow_vector = model.word_vec('meow') # get vector for word
"""
return FastTextKeyedVectors.word_vec(self.wv, word, use_norm=use_norm)
@classmethod
def load_fasttext_format(cls, *args, **kwargs):
"""Load a :class:`~gensim.models.fasttext.FastText` model from a format compatible with
the original fasttext implementation.
Parameters
----------
fname : str
Path to the file.
"""
return Ft_Wrapper.load_fasttext_format(*args, **kwargs)
def save(self, *args, **kwargs):
"""Save the model. This saved model can be loaded again using :func:`~gensim.models.fasttext.FastText.load`,
which supports online training and getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'syn0_vocab_norm', 'syn0_ngrams_norm'])
super(FastText, self).save(*args, **kwargs)
| 32,204 | 44.876068 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/__init__.py | """This package contains some deprecated implementations of algorithm, will be removed soon."""
| 96 | 47.5 | 95 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/fasttext_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.2.0
Use :mod:`gensim.models.fasttext` instead.
Python wrapper around word representation learning from FastText, a library for efficient learning
of word representations and sentence classification [1].
This module allows training a word embedding from a training corpus with the additional ability
to obtain word vectors for out-of-vocabulary words, using the fastText C implementation.
The wrapped model can NOT be updated with new documents for online training -- use gensim's
`Word2Vec` for that.
Example:
>>> from gensim.models.wrappers import FastText
>>> model = FastText.train('/Users/kofola/fastText/fasttext', corpus_file='text8')
>>> print model['forests'] # prints vector for given out-of-vocabulary word
.. [1] https://github.com/facebookresearch/fastText#enriching-word-vectors-with-subword-information
"""
import logging
import tempfile
import os
import struct
import numpy as np
from numpy import float32 as REAL, sqrt, newaxis
from gensim import utils
from gensim.models.deprecated.keyedvectors import KeyedVectors, Vocab
from gensim.models.deprecated.word2vec import Word2Vec
logger = logging.getLogger(__name__)
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
FASTTEXT_FILEFORMAT_MAGIC = 793712314
class FastTextKeyedVectors(KeyedVectors):
"""
Class to contain vectors, vocab and ngrams for the FastText training class and other methods not directly
involved in training such as most_similar().
Subclasses KeyedVectors to implement oov lookups, storing ngrams and other FastText specific methods
"""
def __init__(self):
super(FastTextKeyedVectors, self).__init__()
self.syn0_vocab = None
self.syn0_vocab_norm = None
self.syn0_ngrams = None
self.syn0_ngrams_norm = None
self.ngrams = {}
self.hash2index = {}
self.ngrams_word = {}
self.min_n = 0
self.max_n = 0
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'syn0_vocab_norm', 'syn0_ngrams_norm'])
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
The word can be out-of-vocabulary as long as ngrams for the word are present.
For words with all ngrams absent, a KeyError is raised.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
return super(FastTextKeyedVectors, self).word_vec(word, use_norm)
else:
word_vec = np.zeros(self.syn0_ngrams.shape[1], dtype=np.float32)
ngrams = compute_ngrams(word, self.min_n, self.max_n)
ngrams = [ng for ng in ngrams if ng in self.ngrams]
if use_norm:
ngram_weights = self.syn0_ngrams_norm
else:
ngram_weights = self.syn0_ngrams
for ngram in ngrams:
word_vec += ngram_weights[self.ngrams[ngram]]
if word_vec.any():
return word_vec / len(ngrams)
else: # No ngrams of the word are present in self.ngrams
raise KeyError('all ngrams for word %s absent from model' % word)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can only call `most_similar`, `similarity` etc.
"""
super(FastTextKeyedVectors, self).init_sims(replace)
if getattr(self, 'syn0_ngrams_norm', None) is None or replace:
logger.info("precomputing L2-norms of ngram weight vectors")
if replace:
for i in range(self.syn0_ngrams.shape[0]):
self.syn0_ngrams[i, :] /= sqrt((self.syn0_ngrams[i, :] ** 2).sum(-1))
self.syn0_ngrams_norm = self.syn0_ngrams
else:
self.syn0_ngrams_norm = \
(self.syn0_ngrams / sqrt((self.syn0_ngrams ** 2).sum(-1))[..., newaxis]).astype(REAL)
def __contains__(self, word):
"""
Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if `__contains__` returns True.
"""
if word in self.vocab:
return True
else:
char_ngrams = compute_ngrams(word, self.min_n, self.max_n)
return any(ng in self.ngrams for ng in char_ngrams)
@classmethod
def load_word2vec_format(cls, *args, **kwargs):
"""Not suppported. Use gensim.models.KeyedVectors.load_word2vec_format instead."""
raise NotImplementedError("Not supported. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
class FastText(Word2Vec):
"""
Class for word vector training using FastText. Communication between FastText and Python
takes place by working with data files on disk and calling the FastText binary with
subprocess.call().
Implements functionality similar to [fasttext.py](https://github.com/salestock/fastText.py),
improving speed and scope of functionality like `most_similar`, `similarity` by extracting vectors
into numpy matrix.
Warnings
--------
.. deprecated:: 3.2.0
Use :class:`gensim.models.fasttext.FastText` instead of :class:`gensim.models.wrappers.fasttext.FastText`.
"""
def initialize_word_vectors(self):
self.wv = FastTextKeyedVectors()
@classmethod
def train(cls, ft_path, corpus_file, output_file=None, model='cbow', size=100, alpha=0.025, window=5, min_count=5,
word_ngrams=1, loss='ns', sample=1e-3, negative=5, iter=5, min_n=3, max_n=6, sorted_vocab=1, threads=12):
"""
`ft_path` is the path to the FastText executable, e.g. `/home/kofola/fastText/fasttext`.
`corpus_file` is the filename of the text file to be used for training the FastText model.
Expects file to contain utf-8 encoded text.
`model` defines the training algorithm. By default, cbow is used. Accepted values are
'cbow', 'skipgram'.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate.
`min_count` = ignore all words with total occurrences lower than this.
`word_ngram` = max length of word ngram
`loss` = defines training objective. Allowed values are `hs` (hierarchical softmax),
`ns` (negative sampling) and `softmax`. Defaults to `ns`
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, useful range is (0, 1e-5).
`negative` = the value for negative specifies how many "noise words" should be drawn
(usually between 5-20). Default is 5. If set to 0, no negative samping is used.
Only relevant when `loss` is set to `ns`
`iter` = number of iterations (epochs) over the corpus. Default is 5.
`min_n` = min length of char ngrams to be used for training word representations. Default is 3.
`max_n` = max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used. Default is 6.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
`threads` = number of threads to use. Default is 12.
"""
ft_path = ft_path
output_file = output_file or os.path.join(tempfile.gettempdir(), 'ft_model')
ft_args = {
'input': corpus_file,
'output': output_file,
'lr': alpha,
'dim': size,
'ws': window,
'epoch': iter,
'minCount': min_count,
'wordNgrams': word_ngrams,
'neg': negative,
'loss': loss,
'minn': min_n,
'maxn': max_n,
'thread': threads,
't': sample
}
cmd = [ft_path, model]
for option, value in ft_args.items():
cmd.append("-%s" % option)
cmd.append(str(value))
utils.check_output(args=cmd)
model = cls.load_fasttext_format(output_file)
cls.delete_training_files(output_file)
return model
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'syn0_vocab_norm', 'syn0_ngrams_norm'])
super(FastText, self).save(*args, **kwargs)
@classmethod
def load_fasttext_format(cls, model_file, encoding='utf8'):
"""
Load the input-hidden weight matrix from the fast text output files.
Note that due to limitations in the FastText API, you cannot continue training
with a model loaded this way, though you can query for word similarity etc.
`model_file` is the path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as gensim requires only `.bin` file to load entire fastText model.
"""
model = cls()
if not model_file.endswith('.bin'):
model_file += '.bin'
model.file_name = model_file
model.load_binary_data(encoding=encoding)
return model
@classmethod
def load(cls, *args, **kwargs):
model = super(FastText, cls).load(*args, **kwargs)
if hasattr(model.wv, 'syn0_all'):
setattr(model.wv, 'syn0_ngrams', model.wv.syn0_all)
delattr(model.wv, 'syn0_all')
return model
@classmethod
def delete_training_files(cls, model_file):
"""Deletes the files created by FastText training"""
try:
os.remove('%s.vec' % model_file)
os.remove('%s.bin' % model_file)
except FileNotFoundError:
logger.debug('Training files %s not found when attempting to delete', model_file)
pass
def load_binary_data(self, encoding='utf8'):
"""Loads data from the output binary file created by FastText training"""
with utils.smart_open(self.file_name, 'rb') as f:
self.load_model_params(f)
self.load_dict(f, encoding=encoding)
self.load_vectors(f)
def load_model_params(self, file_handle):
magic, version = self.struct_unpack(file_handle, '@2i')
if magic == FASTTEXT_FILEFORMAT_MAGIC: # newer format
self.new_format = True
dim, ws, epoch, min_count, neg, _, loss, model, bucket, minn, maxn, _, t = \
self.struct_unpack(file_handle, '@12i1d')
else: # older format
self.new_format = False
dim = magic
ws = version
epoch, min_count, neg, _, loss, model, bucket, minn, maxn, _, t = self.struct_unpack(file_handle, '@10i1d')
# Parameters stored by [Args::save](https://github.com/facebookresearch/fastText/blob/master/src/args.cc)
self.vector_size = dim
self.window = ws
self.iter = epoch
self.min_count = min_count
self.negative = neg
self.hs = loss == 1
self.sg = model == 2
self.bucket = bucket
self.wv.min_n = minn
self.wv.max_n = maxn
self.sample = t
def load_dict(self, file_handle, encoding='utf8'):
vocab_size, nwords, nlabels = self.struct_unpack(file_handle, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, self.file_name)
self.struct_unpack(file_handle, '@1q') # number of tokens
if self.new_format:
pruneidx_size, = self.struct_unpack(file_handle, '@q')
for i in range(vocab_size):
word_bytes = b''
char_byte = file_handle.read(1)
# Read vocab word
while char_byte != b'\x00':
word_bytes += char_byte
char_byte = file_handle.read(1)
word = word_bytes.decode(encoding)
count, _ = self.struct_unpack(file_handle, '@qb')
self.wv.vocab[word] = Vocab(index=i, count=count)
self.wv.index2word.append(word)
assert len(self.wv.vocab) == nwords, (
'mismatch between final vocab size ({} words), '
'and expected number of words ({} words)'.format(len(self.wv.vocab), nwords))
if len(self.wv.vocab) != vocab_size:
# expecting to log this warning only for pretrained french vector, wiki.fr
logger.warning(
"mismatch between final vocab size (%s words), and expected vocab size (%s words)",
len(self.wv.vocab), vocab_size
)
if self.new_format:
for j in range(pruneidx_size):
self.struct_unpack(file_handle, '@2i')
def load_vectors(self, file_handle):
if self.new_format:
self.struct_unpack(file_handle, '@?') # bool quant_input in fasttext.cc
num_vectors, dim = self.struct_unpack(file_handle, '@2q')
# Vectors stored by [Matrix::save](https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc)
assert self.vector_size == dim, (
'mismatch between vector size in model params ({}) and model vectors ({})'
.format(self.vector_size, dim)
)
float_size = struct.calcsize('@f')
if float_size == 4:
dtype = np.dtype(np.float32)
elif float_size == 8:
dtype = np.dtype(np.float64)
self.num_original_vectors = num_vectors
self.wv.syn0_ngrams = np.fromfile(file_handle, dtype=dtype, count=num_vectors * dim)
self.wv.syn0_ngrams = self.wv.syn0_ngrams.reshape((num_vectors, dim))
assert self.wv.syn0_ngrams.shape == (self.bucket + len(self.wv.vocab), self.vector_size), \
'mismatch between actual weight matrix shape {} and expected shape {}'\
.format(
self.wv.syn0_ngrams.shape, (self.bucket + len(self.wv.vocab), self.vector_size)
)
self.init_ngrams()
def struct_unpack(self, file_handle, fmt):
num_bytes = struct.calcsize(fmt)
return struct.unpack(fmt, file_handle.read(num_bytes))
def init_ngrams(self):
"""
Computes ngrams of all words present in vocabulary and stores vectors for only those ngrams.
Vectors for other ngrams are initialized with a random uniform distribution in FastText. These
vectors are discarded here to save space.
"""
self.wv.ngrams = {}
all_ngrams = []
self.wv.syn0 = np.zeros((len(self.wv.vocab), self.vector_size), dtype=REAL)
for w, vocab in self.wv.vocab.items():
all_ngrams += compute_ngrams(w, self.wv.min_n, self.wv.max_n)
self.wv.syn0[vocab.index] += np.array(self.wv.syn0_ngrams[vocab.index])
all_ngrams = set(all_ngrams)
self.num_ngram_vectors = len(all_ngrams)
ngram_indices = []
for i, ngram in enumerate(all_ngrams):
ngram_hash = ft_hash(ngram)
ngram_indices.append(len(self.wv.vocab) + ngram_hash % self.bucket)
self.wv.ngrams[ngram] = i
self.wv.syn0_ngrams = self.wv.syn0_ngrams.take(ngram_indices, axis=0)
ngram_weights = self.wv.syn0_ngrams
logger.info(
"loading weights for %s words for fastText model from %s",
len(self.wv.vocab), self.file_name
)
for w, vocab in self.wv.vocab.items():
word_ngrams = compute_ngrams(w, self.wv.min_n, self.wv.max_n)
for word_ngram in word_ngrams:
self.wv.syn0[vocab.index] += np.array(ngram_weights[self.wv.ngrams[word_ngram]])
self.wv.syn0[vocab.index] /= (len(word_ngrams) + 1)
logger.info(
"loaded %s weight matrix for fastText model from %s",
self.wv.syn0.shape, self.file_name
)
def compute_ngrams(word, min_n, max_n):
BOW, EOW = ('<', '>') # Used by FastText to attach to all words as prefix and suffix
extended_word = BOW + word + EOW
ngrams = []
for ngram_length in range(min_n, min(len(extended_word), max_n) + 1):
for i in range(0, len(extended_word) - ngram_length + 1):
ngrams.append(extended_word[i:i + ngram_length])
return ngrams
def ft_hash(string):
"""
Reproduces [hash method](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
used in fastText.
"""
# Runtime warnings for integer overflow are raised, this is expected behaviour. These warnings are suppressed.
old_settings = np.seterr(all='ignore')
h = np.uint32(2166136261)
for c in string:
h = h ^ np.uint32(ord(c))
h = h * np.uint32(16777619)
np.seterr(**old_settings)
return h
| 17,995 | 38.378556 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/doc2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.models.doc2vec` instead.
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_. See [#tutorial]_
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
If you're finished training a model (=no more updates, only querying), you can do
>>> model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True):
to trim unneeded model memory = use (much) less RAM.
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents.
http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
.. [#tutorial] Doc2vec in gensim tutorial,
https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/doc2vec-lee.ipynb
"""
import logging
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue # noqa:F401
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, sum as np_sum, add as np_add, concatenate, \
repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, \
sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide, integer
from gensim import utils
from gensim.utils import call_on_class_only, deprecated
from gensim.models.deprecated.word2vec import Word2Vec, train_cbow_pair, train_sg_pair, train_batch_sg
from gensim.models.deprecated.keyedvectors import KeyedVectors
from gensim.models.doc2vec import Doc2Vec as NewDoc2Vec
from gensim.models.deprecated.old_saveload import SaveLoad
from gensim import matutils # utility fnc for pickling, common scipy operations etc
from six.moves import xrange, zip
from six import string_types, integer_types
logger = logging.getLogger(__name__)
def load_old_doc2vec(*args, **kwargs):
old_model = Doc2Vec.load(*args, **kwargs)
params = {
'dm_mean': old_model.__dict__.get('dm_mean', None),
'dm': old_model.dm,
'dbow_words': old_model.dbow_words,
'dm_concat': old_model.dm_concat,
'dm_tag_count': old_model.dm_tag_count,
'docvecs': old_model.__dict__.get('docvecs', None),
'docvecs_mapfile': old_model.__dict__.get('docvecs_mapfile', None),
'comment': old_model.__dict__.get('comment', None),
'size': old_model.vector_size,
'alpha': old_model.alpha,
'window': old_model.window,
'min_count': old_model.min_count,
'max_vocab_size': old_model.__dict__.get('max_vocab_size', None),
'sample': old_model.sample,
'seed': old_model.seed,
'workers': old_model.workers,
'min_alpha': old_model.min_alpha,
'hs': old_model.hs,
'negative': old_model.negative,
'cbow_mean': old_model.cbow_mean,
'hashfxn': old_model.hashfxn,
'iter': old_model.iter,
'sorted_vocab': old_model.sorted_vocab,
'batch_words': old_model.batch_words,
'compute_loss': old_model.__dict__.get('compute_loss', None)
}
new_model = NewDoc2Vec(**params)
# set word2vec trainables attributes
new_model.wv.vectors = old_model.wv.syn0
if hasattr(old_model.wv, 'syn0norm'):
new_model.docvecs.vectors_norm = old_model.wv.syn0norm
if hasattr(old_model, 'syn1'):
new_model.trainables.syn1 = old_model.syn1
if hasattr(old_model, 'syn1neg'):
new_model.trainables.syn1neg = old_model.syn1neg
if hasattr(old_model, 'syn0_lockf'):
new_model.trainables.vectors_lockf = old_model.syn0_lockf
# set doc2vec trainables attributes
new_model.docvecs.vectors_docs = old_model.docvecs.doctag_syn0
if hasattr(old_model.docvecs, 'doctag_syn0norm'):
new_model.docvecs.vectors_docs_norm = old_model.docvecs.doctag_syn0norm
if hasattr(old_model.docvecs, 'doctag_syn0_lockf'):
new_model.trainables.vectors_docs_lockf = old_model.docvecs.doctag_syn0_lockf
if hasattr(old_model.docvecs, 'mapfile_path'):
new_model.docvecs.mapfile_path = old_model.docvecs.mapfile_path
# set word2vec vocabulary attributes
new_model.wv.vocab = old_model.wv.vocab
new_model.wv.index2word = old_model.wv.index2word
new_model.vocabulary.cum_table = old_model.cum_table
# set doc2vec vocabulary attributes
new_model.docvecs.doctags = old_model.docvecs.doctags
new_model.docvecs.max_rawint = old_model.docvecs.max_rawint
new_model.docvecs.offset2doctag = old_model.docvecs.offset2doctag
new_model.docvecs.count = old_model.docvecs.count
new_model.train_count = old_model.train_count
new_model.corpus_count = old_model.corpus_count
new_model.running_training_loss = old_model.running_training_loss
new_model.total_train_time = old_model.total_train_time
new_model.min_alpha_yet_reached = old_model.min_alpha_yet_reached
new_model.model_trimmed_post_training = old_model.model_trimmed_post_training
return new_model
def train_document_dbow(model, doc_words, doctag_indexes, alpha, work=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed bag of words model ("PV-DBOW") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
examples, exactly as per Word2Vec skip-gram training. (Without this option,
word vectors are neither consulted nor updated during DBOW doc vector training.)
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from doc2vec_inner instead.
"""
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
if train_words and learn_words:
train_batch_sg(model, [doc_words], alpha, work)
for doctag_index in doctag_indexes:
for word in doc_words:
train_sg_pair(
model, word, doctag_index, alpha, learn_vectors=learn_doctags, learn_hidden=learn_hidden,
context_vectors=doctag_vectors, context_locks=doctag_locks
)
return len(doc_words)
def train_document_dm(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
method implements the DM model with a projection (input) layer that is
either the sum or mean of the context vectors, depending on the model's
`dm_mean` configuration field. See `train_document_dm_concat()` for the DM
model with a concatenated input layer.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.wv.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.wv.vocab[w] for w in doc_words if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
count = len(word2_indexes) + len(doctag_indexes)
if model.cbow_mean and count > 1:
l1 /= count
neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
learn_vectors=False, learn_hidden=learn_hidden)
if not model.cbow_mean and count > 1:
neu1e /= count
if learn_doctags:
for i in doctag_indexes:
doctag_vectors[i] += neu1e * doctag_locks[i]
if learn_words:
for i in word2_indexes:
word_vectors[i] += neu1e * word_locks[i]
return len(word_vocabs)
def train_document_dm_concat(model, doc_words, doctag_indexes, alpha, work=None, neu1=None, learn_doctags=True,
learn_words=True, learn_hidden=True, word_vectors=None, word_locks=None,
doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document, using a
concatenation of the context window word vectors (rather than a sum or average).
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.wv.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.wv.vocab[w] for w in doc_words if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
doctag_len = len(doctag_indexes)
if doctag_len != model.dm_tag_count:
return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
null_word = model.wv.vocab['\0']
pre_pad_count = model.window
post_pad_count = model.window
padded_document_indexes = (
(pre_pad_count * [null_word.index]) # pre-padding
+ [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
+ (post_pad_count * [null_word.index]) # post-padding
)
for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
word_context_indexes = (
padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
+ padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
)
predict_word = model.wv.vocab[model.wv.index2word[padded_document_indexes[pos]]]
# numpy advanced-indexing copies; concatenate, flatten to 1d
l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
learn_hidden=learn_hidden, learn_vectors=False)
# filter by locks and shape for addition to source vectors
e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
neu1e_r = (neu1e.reshape(-1, model.vector_size)
* np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
if learn_doctags:
np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
if learn_words:
np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
return len(padded_document_indexes) - pre_pad_count - post_pad_count
class TaggedDocument(namedtuple('TaggedDocument', 'words tags')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags)
# for compatibility
@deprecated("Class will be removed in 4.0.0, use TaggedDocument instead")
class LabeledSentence(TaggedDocument):
pass
class DocvecsArray(SaveLoad):
"""
Default storage of doc vectors during/after training, in a numpy array.
As the 'docvecs' property of a Doc2Vec model, allows access and
comparison of document vectors.
>>> docvec = d2v_model.docvecs[99]
>>> docvec = d2v_model.docvecs['SENT_99'] # if string tag used in training
>>> sims = d2v_model.docvecs.most_similar(99)
>>> sims = d2v_model.docvecs.most_similar('SENT_99')
>>> sims = d2v_model.docvecs.most_similar(docvec)
If only plain int tags are presented during training, the dict (of
string tag -> index) and list (of index -> string tag) stay empty,
saving memory.
Supplying a mapfile_path (as by initializing a Doc2Vec model with a
'docvecs_mapfile' value) will use a pair of memory-mapped
files as the array backing for doctag_syn0/doctag_syn0_lockf values.
The Doc2Vec model automatically uses this class, but a future alternative
implementation, based on another persistence mechanism like LMDB, LevelDB,
or SQLite, should also be possible.
"""
def __init__(self, mapfile_path=None):
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.mapfile_path = mapfile_path
def note_doctag(self, key, document_no, document_length):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, integer_types + (integer,)):
self.max_rawint = max(self.max_rawint, key)
else:
if key in self.doctags:
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = self.max_rawint + 1 + len(self.offset2doctag)
def indexed_doctags(self, doctag_tokens):
"""Return indexes and backing-arrays used in training examples."""
return ([self._int_index(index) for index in doctag_tokens if index in self],
self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
def trained_item(self, indexed_tuple):
"""Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation"""
pass
def _int_index(self, index):
"""Return int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return self.max_rawint + 1 + self.doctags[index].offset
@deprecated("Method will be removed in 4.0.0, use self.index_to_doctag instead")
def _key_index(self, i_index, missing=None):
"""Return string index for given int index, if available"""
return self.index_to_doctag(i_index)
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(index, string_types + integer_types + (integer,)):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
def __len__(self):
return self.count
def __contains__(self, index):
if isinstance(index, integer_types + (integer,)):
return index < self.count
else:
return index in self.doctags
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(DocvecsArray, self).save(*args, **kwargs)
def borrow_from(self, other_docvecs):
self.count = other_docvecs.count
self.doctags = other_docvecs.doctags
self.offset2doctag = other_docvecs.offset2doctag
def clear_sims(self):
self.doctag_syn0norm = None
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.offset2doctag) + 140 * len(self.doctags)
def reset_weights(self, model):
length = max(len(self.doctags), self.count)
if self.mapfile_path:
self.doctag_syn0 = np_memmap(
self.mapfile_path + '.doctag_syn0', dtype=REAL, mode='w+', shape=(length, model.vector_size)
)
self.doctag_syn0_lockf = np_memmap(
self.mapfile_path + '.doctag_syn0_lockf', dtype=REAL, mode='w+', shape=(length,)
)
self.doctag_syn0_lockf.fill(1.0)
else:
self.doctag_syn0 = empty((length, model.vector_size), dtype=REAL)
self.doctag_syn0_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (model.seed, self.index_to_doctag(i))
self.doctag_syn0[i] = model.seeded_vector(seed)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'doctag_syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum(-1))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap(
self.mapfile_path + '.doctag_syn0norm', dtype=REAL,
mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum(-1))[..., newaxis], self.doctag_syn0norm)
def most_similar(self, positive=None, negative=None, topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
clip_end = clip_end or len(self.doctag_syn0norm)
if isinstance(positive, string_types + integer_types + (integer,)) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.doctag_syn0norm[self._int_index(doc)])
all_docs.add(self._int_index(doc))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [
(self.index_to_doctag(sim + clip_start), float(dists[sim]))
for sim in best
if (sim + clip_start) not in all_docs
]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s", docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(self.doctag_syn0norm[self._int_index(doc)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Document should be a list of (word) tokens.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also DocvecsArray.index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class Doc2Vec(Word2Vec):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, comment=None, trim_rule=None, **kwargs):
"""
Initialize the model from an iterable of `documents`. Each document is a
TaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, values of 1e-5 (or lower) may also be useful, set to 0.0 to disable downsampling.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published 'Paragraph Vector' experiments.
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmetically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
if 'sentences' in kwargs:
raise DeprecationWarning(
"Parameter 'sentences' was renamed to 'documents', and will be removed in 4.0.0, "
"use 'documents' instead."
)
super(Doc2Vec, self).__init__(
sg=(1 + dm) % 2,
null_word=dm_concat,
**kwargs)
self.load = call_on_class_only
if dm_mean is not None:
self.cbow_mean = dm_mean
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if self.dm and self.dm_concat:
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
self.docvecs = docvecs or DocvecsArray(docvecs_mapfile)
self.comment = comment
if documents is not None:
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents, total_examples=self.corpus_count, epochs=self.iter)
@property
def dm(self):
return not self.sg # opposite of SG
@property
def dbow(self):
return self.sg # same as SG
def clear_sims(self):
super(Doc2Vec, self).clear_sims()
self.docvecs.clear_sims()
def reset_weights(self):
if self.dm and self.dm_concat:
# expand l1 size to match concatenated tags+words length
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
logger.info("using concatenative %d-dimensional layer1", self.layer1_size)
super(Doc2Vec, self).reset_weights()
self.docvecs.reset_weights(self)
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.docvecs.borrow_from(other_model.docvecs)
super(Doc2Vec, self).reset_from(other_model)
def scan_vocab(self, documents, progress_per=10000, trim_rule=None, update=False):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if not checked_string_types:
if isinstance(document.words, string_types):
logger.warning(
"Each 'words' should be a list of words (usually unicode strings). "
"First 'words' here is instead plain %s.",
type(document.words)
)
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info(
"PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags",
document_no, total_words, interval_rate, len(vocab), len(self.docvecs)
)
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.docvecs.note_doctag(tag, document_no, document_length)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types and %i unique tags from a corpus of %i examples and %i words",
len(vocab), len(self.docvecs), document_no + 1, total_words
)
self.corpus_count = document_no + 1
self.raw_vocab = vocab
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
indexed_doctags = self.docvecs.indexed_doctags(doc.tags)
doctag_indexes, doctag_vectors, doctag_locks, ignored = indexed_doctags
if self.sg:
tally += train_document_dbow(
self, doc.words, doctag_indexes, alpha, work, train_words=self.dbow_words,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
elif self.dm_concat:
tally += train_document_dm_concat(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
else:
tally += train_document_dm(
self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
self.docvecs.trained_item(indexed_doctags)
return tally, self._raw_word_count(job)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def infer_vector(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.
"""
doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_document_dbow(
self, doc_words, doctag_indexes, alpha, work,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
elif self.dm_concat:
train_document_dm_concat(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
else:
train_document_dm(
self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks
)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
return super(Doc2Vec, self).estimate_memory(vocab_size, report=report)
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
def delete_temporary_training_data(self, keep_doctags_vectors=True, keep_inference=True):
"""
Discard parameters that are used in training and score. Use if you're sure you're done training a model.
Set `keep_doctags_vectors` to False if you don't want to save doctags vectors,
in this case you can't to use docvecs's most_similar, similarity etc. methods.
Set `keep_inference` to False if you don't want to store parameters that is used for infer_vector method
"""
if not keep_inference:
self._minimize_model(False, False, False)
if self.docvecs and hasattr(self.docvecs, 'doctag_syn0') and not keep_doctags_vectors:
del self.docvecs.doctag_syn0
if self.docvecs and hasattr(self.docvecs, 'doctag_syn0_lockf'):
del self.docvecs.doctag_syn0_lockf
def save_word2vec_format(self, fname, doctag_vec=False, word_vec=True, prefix='*dt_', fvocab=None, binary=False):
"""
Store the input-hidden weight matrix.
`fname` is the file used to save the vectors in
`doctag_vec` is an optional boolean indicating whether to store document vectors
`word_vec` is an optional boolean indicating whether to store word vectors
(if both doctag_vec and word_vec are True, then both vectors are stored in the same file)
`prefix` to uniquely identify doctags from word vocab, and avoid collision
in case of repeated string in doctag and word vocab
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
"""
total_vec = len(self.wv.vocab) + len(self.docvecs)
# save word vectors
if word_vec:
if not doctag_vec:
total_vec = len(self.wv.vocab)
KeyedVectors.save_word2vec_format(self.wv, fname, fvocab, binary, total_vec)
# save document vectors
if doctag_vec:
with utils.smart_open(fname, 'ab') as fout:
if not word_vec:
total_vec = len(self.docvecs)
logger.info("storing %sx%s projection weights into %s", total_vec, self.vector_size, fname)
fout.write(utils.to_utf8("%s %s\n" % (total_vec, self.vector_size)))
# store as in input order
for i in range(len(self.docvecs)):
doctag = u"%s%s" % (prefix, self.docvecs.index_to_doctag(i))
row = self.docvecs.doctag_syn0[i]
if binary:
fout.write(utils.to_utf8(doctag) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (doctag, ' '.join("%f" % val for val in row))))
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a TaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield TaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one TaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
| 45,785 | 43.756598 | 118 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/keyedvectors.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.models.keyedvectors` instead.
Word vector storage and similarity look-ups.
Common code independent of the way the vectors are trained(Word2Vec, FastText, WordRank, VarEmbed etc)
The word vectors are considered read-only in this class.
Initialize the vectors by training e.g. Word2Vec::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with::
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname)
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance::
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the vectors. Some of them
are already built-in::
>>> word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> word_vectors.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> word_vectors.similarity('woman', 'man')
0.73723527
Correlation with human opinion on word similarity::
>>> word_vectors.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> word_vectors.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # noqa:F401
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import dot, zeros, dtype, float32 as REAL,\
double, array, vstack, fromstring, sqrt, newaxis,\
ndarray, sum as np_sum, prod, ascontiguousarray,\
argmax
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, iteritems
from six.moves import xrange
from scipy import stats
logger = logging.getLogger(__name__)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class KeyedVectorsBase(utils.SaveLoad):
"""
Base class to contain vectors and vocab for any set of vectors which are each associated with a key.
"""
def __init__(self):
self.syn0 = []
self.vocab = {}
self.index2word = []
self.vector_size = None
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
`fname` is the file used to save the vectors in
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
`total_vec` is an optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
if total_vec is None:
total_vec = len(self.vocab)
vector_size = self.syn0.shape[1]
if fvocab is not None:
logger.info("storing vocabulary in %s", fvocab)
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
assert (len(self.vocab), vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.syn0.shape[0], len(result.vocab)
)
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
logger.info("loaded %s matrix from %s", result.syn0.shape, fname)
return result
def similarity(self, w1, w2):
"""
Compute similarity between vectors of two input words.
To be implemented by child class.
"""
raise NotImplementedError
def distance(self, w1, w2):
"""
Compute distance between vectors of two input words.
To be implemented by child class.
"""
raise NotImplementedError
def distances(self, word_or_vector, other_words=()):
"""
Compute distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
To be implemented by child class.
"""
raise NotImplementedError
def word_vec(self, word):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model.word_vec('office')
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.word_vec(words)
return vstack([self.word_vec(word) for word in words])
def __contains__(self, word):
return word in self.vocab
def most_similar_to_given(self, w1, word_list):
"""Return the word from word_list most similar to w1.
Args:
w1 (str): a word
word_list (list): list of words containing a word most similar to w1
Returns:
the word in word_list with the highest similarity to w1
Raises:
KeyError: If w1 or any word in word_list is not in the vocabulary
Example::
>>> trained_model.most_similar_to_given('music', ['water', 'sound', 'backpack', 'mouse'])
'sound'
>>> trained_model.most_similar_to_given('snake', ['food', 'pencil', 'animal', 'phone'])
'animal'
"""
return word_list[argmax([self.similarity(w1, word) for word in word_list])]
def words_closer_than(self, w1, w2):
"""
Returns all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
>>> model.words_closer_than('carnivore.n.01', 'mammal.n.01')
['dog.n.01', 'canine.n.02']
"""
all_distances = self.distances(w1)
w1_index = self.vocab[w1].index
w2_index = self.vocab[w2].index
closer_node_indices = np.where(all_distances < all_distances[w2_index])[0]
return [self.index2word[index] for index in closer_node_indices if index != w1_index]
def rank(self, w1, w2):
"""
Rank of the distance of `w2` from `w1`, in relation to distances of all words from `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
int
Rank of `w2` from `w1` in relation to all other nodes.
Examples
--------
>>> model.rank('mammal.n.01', 'carnivore.n.01')
3
"""
return len(self.words_closer_than(w1, w2)) + 1
class EuclideanKeyedVectors(KeyedVectorsBase):
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
super(EuclideanKeyedVectors, self).__init__()
self.syn0norm = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(EuclideanKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.syn0norm[self.vocab[word].index]
else:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
If topn is False, similar_by_word returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
If topn is False, similar_by_vector returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_vector([1,2])
[('survey', 0.9942699074745178), ...]
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info(
"At least one of the documents had no words that werein the vocabulary. "
"Aborting (returning inf)."
)
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if t1 not in docset1 or t2 not in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = {
self.vocab[word].index for word in positive + negative
if not isinstance(word, ndarray) and word in self.vocab
}
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""
Return cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed.
expected shape (num_vectors, dim)
Returns
-------
numpy.array
Contains cosine distance between vector_1 and each row in vectors_all.
shape (num_vectors,)
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words=()):
"""
Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_words : iterable(str) or None
For each word in `other_words` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if not other_words:
other_vectors = self.syn0
else:
other_indices = [self.vocab[word].index for word in other_words]
other_vectors = self.syn0[other_indices]
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, w1, w2):
"""
Compute cosine distance between two words.
Example::
>>> trained_model.distance('woman', 'man')
0.34
>>> trained_model.distance('woman', 'woman')
0.0
"""
return 1 - self.similarity(w1, w2)
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info(
"%s: %.1f%% (%i/%i)",
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect
)
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in
https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip
for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("skipping invalid line #%i in %s", line_no, questions)
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s", line_no, line.strip())
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = {a, b, c} # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for index in matutils.argsort(sims, reverse=True):
predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.info('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.info('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.info('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(self.similarity(a, b)) # Similarity from the model
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def get_keras_embedding(self, train_embeddings=False):
"""
Return a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings
"""
try:
from keras.layers import Embedding
except ImportError:
raise ImportError("Please install Keras to use this function")
weights = self.syn0
# set `trainable` as `False` to use the pretrained word embedding
# No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
layer = Embedding(
input_dim=weights.shape[0], output_dim=weights.shape[1],
weights=[weights], trainable=train_embeddings
)
return layer
# For backward compatibility
KeyedVectors = EuclideanKeyedVectors
| 43,913 | 39.850233 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/ldavowpalwabbit.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Dave Challis <dave@suicas.net>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python wrapper for `Vowpal Wabbit's Latent Dirichlet Allocation <https://github.com/JohnLangford/vowpal_wabbit/>`_.
This uses `Matt Hoffman's online algorithm
<http://papers.nips.cc/paper/3902-online-learning-for-latent-dirichlet-allocation.pdf>`_, i.e. the same algorithm
that Gensim's :class:`~gensim.models.ldamodel.LdaModel` is based on.
Installation
------------
Use `official guide <https://github.com/JohnLangford/vowpal_wabbit>`_ or this one ::
git clone https://github.com/JohnLangford/vowpal_wabbit.git
cd vowpal_wabbit
make
make test
sudo make install
Warnings
--------
Currently working and tested with Vowpal Wabbit versions 7.10 to 8.1.1. Vowpal Wabbit's API isn't currently stable,
so this may or may not work with older/newer versions. The aim will be to ensure this wrapper always works with
the latest release of Vowpal Wabbit.
Examples
--------
Train model
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.wrappers import LdaVowpalWabbit
>>>
>>> path_to_wv_binary = "/path/to/vw/binary"
>>> model = LdaVowpalWabbit(path_to_wv_binary, corpus=common_corpus, num_topics=20, id2word=common_dictionary)
Update existing model
>>> another_corpus = [[(1, 1), (2, 1)], [(3, 5)]]
>>> model.update(another_corpus)
Get topic probability distributions for a document
>>> document_bow = [(1, 1)]
>>> print(model[document_bow])
Print topics
>>> print(model.print_topics())
Save/load the trained model
>>> from gensim.test.utils import get_tmpfile
>>>
>>> temp_path = get_tmpfile("vw_lda.model")
>>> model.save(temp_path)
>>>
>>> loaded_lda = LdaVowpalWabbit.load(temp_path)
Calculate log-perplexoty on given corpus
>>> another_corpus = [[(1, 1), (2, 1)], [(3, 5)]]
>>> print(model.log_perpexity(another_corpus))
Vowpal Wabbit works on files, so this wrapper maintains a temporary directory while it's around,
reading/writing there as necessary.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import shutil
import subprocess
import tempfile
import numpy
from gensim import utils, matutils
from gensim.models.ldamodel import LdaModel
logger = logging.getLogger(__name__)
class LdaVowpalWabbit(utils.SaveLoad):
"""Python wrapper using `Vowpal Wabbit's online LDA <https://github.com/JohnLangford/vowpal_wabbit/>`_.
Communication between Vowpal Wabbit and Python takes place by passing around data files
on disk and calling the 'vw' binary with the subprocess module.
Warnings
--------
This is **only** python wrapper for `Vowpal Wabbit's online LDA <https://github.com/JohnLangford/vowpal_wabbit/>`_,
you need to install original implementation first and pass the path to binary to ``vw_path``.
"""
def __init__(self, vw_path, corpus=None, num_topics=100, id2word=None,
chunksize=256, passes=1, alpha=0.1, eta=0.1, decay=0.5,
offset=1, gamma_threshold=0.001, random_seed=None,
cleanup_files=True, tmp_prefix='tmp'):
"""
Parameters
----------
vw_path : str
Path to Vowpal Wabbit's binary.
corpus : iterable of list of (int, int), optional
Collection of texts in BoW format. If given, training will start immediately,
otherwise, you should call :meth:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit.train` or
:meth:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit.update` manually for training.
num_topics : int, optional
Number of requested latent topics to be extracted from the training corpus.
Corresponds to VW's ``--lda <num_topics>`` argument.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping from word ids (integers) to words (strings).
chunksize : int, optional
Number of documents examined in each batch.
Corresponds to VW's ``--minibatch <batch_size>`` argument.
passes : int, optional
Number of passes over the dataset to use.
Corresponds to VW's ``--passes <passes>`` argument.
alpha : float, optional
Float effecting sparsity of per-document topic weights.
This is applied symmetrically, and should be set higher to when documents are thought to look more similar.
Corresponds to VW's ``--lda_alpha <alpha>`` argument.
eta : float, optional
Affects the sparsity of topic distributions.
This is applied symmetrically, and should be set higher when topics
are thought to look more similar.
Corresponds to VW's ``--lda_rho <rho>`` argument.
decay : float, optional
Learning rate decay, affects how quickly learnt values are forgotten.
Should be set to a value between 0.5 and 1.0 to guarantee convergence.
Corresponds to VW's ``--power_t <tau>`` argument.
offset: int, optional
Learning offset, set to higher values to slow down learning on early iterations of the algorithm.
Corresponds to VW's ``--initial_t <tau>`` argument.
gamma_threshold : float, optional
Affects when learning loop will be broken out of, higher values will result in earlier loop completion.
Corresponds to VW's ``--epsilon <eps>`` argument.
random_seed : int, optional
Sets random seed when learning.
Corresponds to VW's ``--random_seed <seed>`` argument.
cleanup_files : bool, optional
Whether or not to delete temporary directory and files used by this wrapper.
Setting to False can be useful for debugging, or for re-using Vowpal Wabbit files elsewhere.
tmp_prefix : str, optional
To prefix temporary working directory name.
"""
# default parameters are taken from Vowpal Wabbit's defaults, and
# parameter names changed to match Gensim's LdaModel where possible
self.vw_path = vw_path
self.id2word = id2word
if self.id2word is None:
if corpus is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish input space dimensionality"
)
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
# LDA parameters
self.num_topics = num_topics
self.chunksize = chunksize
self.passes = passes
self.alpha = alpha
self.eta = eta
self.gamma_threshold = gamma_threshold
self.offset = offset
self.decay = decay
self.random_seed = random_seed
self._initial_offset = offset
# temporary files used for Vowpal Wabbit input/output
self.tmp_dir = None
self.tmp_prefix = tmp_prefix
self.cleanup_files = cleanup_files
self._init_temp_dir(tmp_prefix)
# used for saving/loading this model's state
self._model_data = None
self._topics_data = None
# cache loaded topics as numpy array
self._topics = None
if corpus is not None:
self.train(corpus)
def train(self, corpus):
"""Clear any existing model state, and train on given `corpus`.
Parameters
----------
corpus : iterable of list of (int, int)
Collection of texts in BoW format.
"""
logger.debug('Training new model from corpus')
# reset any existing offset, model, or topics generated
self.offset = self._initial_offset
self._topics = None
corpus_size = write_corpus_as_vw(corpus, self._corpus_filename)
cmd = self._get_vw_train_command(corpus_size)
_run_vw_command(cmd)
# ensure that future updates of this model use correct offset
self.offset += corpus_size
def update(self, corpus):
"""Update existing model with `corpus`.
Parameters
----------
corpus : iterable of list of (int, int)
Collection of texts in BoW format.
"""
if not os.path.exists(self._model_filename):
return self.train(corpus)
logger.debug('Updating exiting model from corpus')
# reset any existing topics generated
self._topics = None
corpus_size = write_corpus_as_vw(corpus, self._corpus_filename)
cmd = self._get_vw_update_command(corpus_size)
_run_vw_command(cmd)
# ensure that future updates of this model use correct offset
self.offset += corpus_size
def log_perplexity(self, chunk):
"""Get per-word lower bound on log perplexity.
Parameters
----------
chunk : iterable of list of (int, int)
Collection of texts in BoW format.
Returns
-------
bound : float
Per-word lower bound on log perplexity.
"""
vw_data = self._predict(chunk)[1]
corpus_words = sum(cnt for document in chunk for _, cnt in document)
bound = -vw_data['average_loss']
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words",
bound, numpy.exp2(-bound), vw_data['corpus_size'], corpus_words
)
return bound
def get_topics(self):
"""Get topics X words matrix.
Returns
-------
numpy.ndarray
`num_topics` x `vocabulary_size` array of floats which represents the learned term topic matrix.
"""
topics = self._get_topics()
return topics / topics.sum(axis=1)[:, None]
def print_topics(self, num_topics=10, num_words=10):
"""Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.show_topics`.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
num_words : int, optional
Number of words.
Returns
-------
list of str
Topics as a list of strings
"""
return self.show_topics(num_topics, num_words, log=True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Get the `num_words` most probable words for `num_topics` number of topics.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
num_words : int, optional
Number of words.
log : bool, optional
If True - will write topics with logger.
formatted : bool, optional
If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs.
Returns
-------
list of str
Topics as a list of strings (if formatted=True) **OR**
list of (float, str)
Topics as list of (weight, word) pairs (if formatted=False)
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
else:
num_topics = min(num_topics, self.num_topics)
chosen_topics = range(num_topics)
shown = []
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha, topic)
return shown
def print_topic(self, topicid, topn=10):
"""Get text representation of topic.
Parameters
----------
topicid : int
Id of topic.
topn : int, optional
Top number of words in topic.
Returns
-------
str
Topic `topicid` in text representation.
"""
return ' + '.join(['{0:.3f}*{1}'.format(v[0], v[1]) for v in self.show_topic(topicid, topn)])
def show_topic(self, topicid, topn=10):
"""Get `num_words` most probable words for the given `topicid`.
Parameters
----------
topicid : int
Id of topic.
topn : int, optional
Top number of topics that you'll receive.
Returns
-------
list of (str, float)
Sequence of probable words, as a list of `(word, word_probability)` for `topicid` topic.
"""
topics = self._get_topics()
topic = topics[topicid]
bestn = matutils.argsort(topic, topn, reverse=True)
return [(topic[t_id], self.id2word[t_id]) for t_id in bestn]
def save(self, fname, *args, **kwargs):
"""Save model to file.
Parameters
----------
fname : str
Path to output file.
"""
if os.path.exists(self._model_filename):
# Vowpal Wabbit uses its own binary model file, read this into
# variable before serialising this object - keeps all data
# self contained within a single serialised file
logger.debug("Reading model bytes from '%s'", self._model_filename)
with utils.smart_open(self._model_filename, 'rb') as fhandle:
self._model_data = fhandle.read()
if os.path.exists(self._topics_filename):
logger.debug("Reading topic bytes from '%s'", self._topics_filename)
with utils.smart_open(self._topics_filename, 'rb') as fhandle:
self._topics_data = fhandle.read()
if 'ignore' not in kwargs:
kwargs['ignore'] = frozenset(['_topics', 'tmp_dir'])
super(LdaVowpalWabbit, self).save(fname, *args, **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load model from `fname`.
Parameters
----------
fname : str
Path to file with :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit`.
"""
lda_vw = super(LdaVowpalWabbit, cls).load(fname, *args, **kwargs)
lda_vw._init_temp_dir(prefix=lda_vw.tmp_prefix)
if lda_vw._model_data:
# Vowpal Wabbit operates on its own binary model file - deserialise
# to file at load time, making it immediately ready for use
logger.debug("Writing model bytes to '%s'", lda_vw._model_filename)
with utils.smart_open(lda_vw._model_filename, 'wb') as fhandle:
fhandle.write(lda_vw._model_data)
lda_vw._model_data = None # no need to keep in memory after this
if lda_vw._topics_data:
logger.debug("Writing topic bytes to '%s'", lda_vw._topics_filename)
with utils.smart_open(lda_vw._topics_filename, 'wb') as fhandle:
fhandle.write(lda_vw._topics_data)
lda_vw._topics_data = None
return lda_vw
def __del__(self):
"""Cleanup the temporary directory used by this wrapper."""
if self.cleanup_files and self.tmp_dir:
logger.debug("Recursively deleting: %s", self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def _init_temp_dir(self, prefix='tmp'):
"""Create a working temporary directory with given prefix.
Parameters
----------
prefix : str
Prefix of the temporary directory.
"""
self.tmp_dir = tempfile.mkdtemp(prefix=prefix)
logger.info('using %s as temp dir', self.tmp_dir)
def _get_vw_predict_command(self, corpus_size):
"""Get list of command line arguments for running prediction.
Parameters
----------
corpus_size : int
Size of the corpus.
"""
cmd = [
self.vw_path,
'--testonly', # don't update model with this data
'--lda_D', str(corpus_size),
'-i', self._model_filename, # load existing binary model
'-d', self._corpus_filename,
'--learning_rate', '0', # possibly not needed, but harmless
'-p', self._predict_filename
]
if self.random_seed is not None:
cmd.extend(['--random_seed', str(self.random_seed)])
return cmd
def _get_vw_train_command(self, corpus_size, update=False):
"""Get list of command line arguments for running model training.
Parameters
----------
corpus_size : int
Size of corpus.
update : bool
Set `True` to further train an existing model.
Returns
-------
list of str
Sequence of all training parameters.
"""
cmd = [
self.vw_path,
'-d', self._corpus_filename,
'--power_t', str(self.decay),
'--initial_t', str(self.offset),
'--minibatch', str(self.chunksize),
'--lda_D', str(corpus_size),
'--passes', str(self.passes),
'--cache_file', self._cache_filename,
'--lda_epsilon', str(self.gamma_threshold),
'--readable_model', self._topics_filename,
'-k', # clear cache
'-f', self._model_filename
]
if update:
cmd.extend(['-i', self._model_filename])
else:
# these params are read from model file if updating
cmd.extend([
'--lda', str(self.num_topics),
'-b', str(_bit_length(self.num_terms)),
'--lda_alpha', str(self.alpha),
'--lda_rho', str(self.eta)
])
if self.random_seed is not None:
cmd.extend(['--random_seed', str(self.random_seed)])
return cmd
def _get_vw_update_command(self, corpus_size):
"""Get list of command line arguments to update a model.
Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel._get_vw_train_command`
Parameters
----------
corpus_size : int
Size of the corpus.
Returns
-------
list of str
Sequence of all training parameters.
"""
return self._get_vw_train_command(corpus_size, update=True)
def _load_vw_topics(self):
"""Read topics file generated by Vowpal Wabbit, convert to numpy array."""
topics = numpy.zeros((self.num_topics, self.num_terms), dtype=numpy.float32)
with utils.smart_open(self._topics_filename) as topics_file:
found_data = False
for line in topics_file:
# look for start of data
if not found_data:
if line.startswith(b'0 ') and b':' not in line:
found_data = True
else:
continue
fields = line.split()
word_id = int(fields[0])
# output contains entries for 2**b terms, where b was set
# by the '-b' option, ignore anything past num_terms
if word_id >= self.num_terms:
break
topics[:, word_id] = fields[1:]
# normalise to probability distribution
self._topics = topics / topics.sum(axis=1, keepdims=True)
def _get_topics(self):
"""Get topics matrix, load from file if necessary."""
if self._topics is None:
self._load_vw_topics()
return self._topics
def _predict(self, chunk):
"""Run given chunk of documents against currently trained model.
Parameters
----------
chunk : iterable of list of (int, int)
Sequence of documents in BoW format.
Returns
-------
predictions : ndarray
Tuple of prediction matrix.
vw_data : dict
Vowpal Wabbit data.
"""
corpus_size = write_corpus_as_vw(chunk, self._corpus_filename)
cmd = self._get_vw_predict_command(corpus_size)
vw_data = _parse_vw_output(_run_vw_command(cmd))
vw_data['corpus_size'] = corpus_size
predictions = numpy.zeros((corpus_size, self.num_topics), dtype=numpy.float32)
with utils.smart_open(self._predict_filename) as fhandle:
for i, line in enumerate(fhandle):
predictions[i, :] = line.split()
predictions = predictions / predictions.sum(axis=1, keepdims=True)
return predictions, vw_data
def __getitem__(self, bow, eps=0.01):
"""Convert document or corpus in BoW format to LDA vectors in BoW format
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Document or corpus in BoW format.
eps : float
Threshold value (all topics with probability < `eps` will be ignored.
Returns
-------
list of (int, float)
LDA vector for document **OR**
list of list of (int, float)
LDA vectors for corpus.
"""
is_corpus, dummy_corpus = utils.is_corpus(bow)
if not is_corpus:
bow = [bow]
predictions = self._predict(bow)[0]
topics = []
for row in predictions:
row_topics = []
for topic_id, val in enumerate(row):
if val > eps:
row_topics.append((topic_id, val))
topics.append(row_topics)
return topics if is_corpus else topics[0]
def _get_filename(self, name):
"""Get path to given filename in temp directory.
Parameters
----------
name : str
Name of the file.
Returns
-------
str
Path to a file.
"""
return os.path.join(self.tmp_dir, name)
@property
def _model_filename(self):
"""Get path to file to write Vowpal Wabbit model to.
Returns
-------
str
Path to file to write Vowpal Wabbit model to.
"""
return self._get_filename('model.vw')
@property
def _cache_filename(self):
"""Get path to file to write Vowpal Wabbit cache to.
Returns
-------
str
Path to file to write Vowpal Wabbit cache to.
"""
return self._get_filename('cache.vw')
@property
def _corpus_filename(self):
"""Get path to file to write Vowpal Wabbit corpus to.
Returns
-------
str
Path to file to write Vowpal Wabbit corpus to.
"""
return self._get_filename('corpus.vw')
@property
def _topics_filename(self):
"""Get path to file to write Vowpal Wabbit topics to.
Returns
-------
str
Path to file to write Vowpal Wabbit topics to.
"""
return self._get_filename('topics.vw')
@property
def _predict_filename(self):
"""Get path to file to write Vowpal Wabbit predictions to.
Returns
-------
str
Path to file to write Vowpal Wabbit predictions to.
"""
return self._get_filename('predict.vw')
def __str__(self):
"""Get text representation of model."""
fields = ['num_terms', 'num_topics', 'chunksize', 'alpha', 'eta']
kv = ["{0}={1}".format(field, getattr(self, field)) for field in fields]
return "{0}({1})".format(self.__class__.__name__, ', '.join(kv))
def corpus_to_vw(corpus):
"""Convert corpus to Vowpal Wabbit format.
Parameters
----------
corpus : iterable of list of (int, int)
Collection of texts in BoW format.
Notes
-----
Vowpal Wabbit format ::
| 4:7 14:1 22:8 6:3
| 14:22 22:4 0:1 1:3
| 7:2 8:2
Yields
------
str
Corpus in Vowpal Wabbit, line by line.
"""
for entries in corpus:
line = ['|']
for word_id, count in entries:
line.append("{0}:{1}".format(word_id, count))
yield ' '.join(line)
def write_corpus_as_vw(corpus, filename):
"""Covert `corpus` to Vowpal Wabbit format and save it to `filename`.
Parameters
----------
corpus : iterable of list of (int, int)
Collection of texts in BoW format.
filename : str
Path to output file.
Returns
-------
int
Number of lines in `filename`.
"""
logger.debug("Writing corpus to: %s", filename)
corpus_size = 0
with utils.smart_open(filename, 'wb') as corpus_file:
for line in corpus_to_vw(corpus):
corpus_file.write(line.encode('utf-8') + b'\n')
corpus_size += 1
return corpus_size
def _parse_vw_output(text):
"""Get dict of useful fields from Vowpal Wabbit's output.
Parameters
----------
text : str
Text from vw file.
Returns
-------
dict of (str, float)
Dictionary with field "average_loss", lower bound on mean per-word log-perplexity.
"""
data = {}
for line in text.splitlines():
if line.startswith('average loss'):
data['average_loss'] = float(line.split('=')[1])
break
return data
def _run_vw_command(cmd):
"""Execute given Vowpal Wabbit command, log stdout and stderr.
Parameters
----------
cmd : str
Given Vowpal Wabbit command to execute.
Returns
-------
str
Stdout and stderr.
Raises
------
subprocess.CalledProcessError
If something goes wrong.
"""
logger.info("Running Vowpal Wabbit command: %s", ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0].decode('utf-8')
logger.debug("Vowpal Wabbit output: %s", output)
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd), output=output)
return output
# if python2.6 support is ever dropped, can change to using int.bit_length()
def _bit_length(num):
"""Get number of bits needed to encode given number.
Parameters
----------
num : int
Number to encode.
Returns
-------
int
Number of bits needed to encode given number.
"""
return len(bin(num).lstrip('-0b'))
def vwmodel2ldamodel(vw_model, iterations=50):
"""Convert :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit` to
:class:`~gensim.models.ldamodel.LdaModel`.
This works by simply copying the training model weights (alpha, beta...) from a trained vwmodel
into the gensim model.
Parameters
----------
vw_model : :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit`
Trained Vowpal Wabbit model.
iterations : int
Number of iterations to be used for inference of the new :class:`~gensim.models.ldamodel.LdaModel`.
Returns
-------
:class:`~gensim.models.ldamodel.LdaModel`.
Gensim native LDA.
"""
model_gensim = LdaModel(
num_topics=vw_model.num_topics, id2word=vw_model.id2word, chunksize=vw_model.chunksize,
passes=vw_model.passes, alpha=vw_model.alpha, eta=vw_model.eta, decay=vw_model.decay,
offset=vw_model.offset, iterations=iterations, gamma_threshold=vw_model.gamma_threshold,
dtype=numpy.float32
)
model_gensim.expElogbeta[:] = vw_model._get_topics()
return model_gensim
| 28,176 | 30.482682 | 119 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/wordrank.py | # Copyright (C) 2017 Parul Sethi <parul1sethi@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python wrapper around `Wordrank <https://bitbucket.org/shihaoji/wordrank/>`_.
Original paper: `"WordRank: Learning Word Embeddings via Robust Ranking " <https://arxiv.org/pdf/1506.02761v3.pdf>`_.
Installation
------------
Use `official guide <https://github.com/shihaoji/wordrank>`_ or this one
* On Linux ::
sudo yum install boost-devel #(on RedHat/Centos)
sudo apt-get install libboost-all-dev #(on Ubuntu)
git clone https://bitbucket.org/shihaoji/wordrank
cd wordrank/
# replace icc to gcc in install.sh
./install.sh
* On MacOS ::
brew install cmake
brew install wget
brew install boost
brew install mercurial
git clone https://bitbucket.org/shihaoji/wordrank
cd wordrank/
# replace icc to gcc in install.sh
./install.sh
Examples
--------
>>> from gensim.models.wrappers import Wordrank
>>>
>>> path_to_wordrank_binary = '/path/to/wordrank/binary'
>>> model = Wordrank.train(path_to_wordrank_binary, corpus_file='text8', out_name='wr_model')
>>>
>>> print model["hello"] # prints vector for given words
Warnings
--------
Note that the wrapper might not work in a docker container for large datasets due to memory limits (caused by MPI).
"""
from __future__ import division
import logging
import os
import copy
import multiprocessing
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from smart_open import smart_open
from shutil import copyfile, rmtree
logger = logging.getLogger(__name__)
class Wordrank(KeyedVectors):
"""Python wrapper using `Wordrank implementation <https://bitbucket.org/shihaoji/wordrank/>`_
Communication between Wordrank and Python takes place by working with data
files on disk and calling the Wordrank binary and glove's helper binaries
(for preparing training data) with subprocess module.
Warnings
--------
This is **only** python wrapper for `Wordrank implementation <https://bitbucket.org/shihaoji/wordrank/>`_,
you need to install original implementation first and pass the path to wordrank dir to ``wr_path``.
"""
@classmethod
def train(cls, wr_path, corpus_file, out_name, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0,
sgd_num=100, lrate=0.001, period=10, iter=90, epsilon=0.75, dump_period=10, reg=0, alpha=100,
beta=99, loss='hinge', memory=4.0, np=1, cleanup_files=False, sorted_vocab=1, ensemble=0):
"""Train model.
Parameters
----------
wr_path : str
Absolute path to the Wordrank directory.
corpus_file : str
Path to corpus file, expected space-separated tokens in a each line format.
out_name : str
Name of the directory which will be created (in wordrank folder) to save embeddings and training data:
* ``model_word_current_<iter>.txt`` - Word Embeddings saved after every dump_period.
* ``model_context_current_<iter>.txt`` - Context Embeddings saved after every dump_period.
* ``meta/vocab.txt`` - vocab file.
* ``meta/wiki.toy`` - word-word concurrence values.
size : int, optional
Dimensionality of the feature vectors.
window : int, optional
Number of context words to the left (and to the right, if `symmetric = 1`).
symmetric : {0, 1}, optional
If 1 - using symmetric windows, if 0 - will use only left context words.
min_count : int, optional
Ignore all words with total frequency lower than `min_count`.
max_vocab_size : int, optional
Upper bound on vocabulary size, i.e. keep the <int> most frequent words. If 0 - no limit.
sgd_num : int, optional
Number of SGD taken for each data point.
lrate : float, optional
Learning rate (attention: too high diverges, give Nan).
period : int, optional
Period of xi variable updates.
iter : int, optional
Number of iterations (epochs) over the corpus.
epsilon : float, optional
Power scaling value for weighting function.
dump_period : int, optional
Period after which embeddings should be dumped.
reg : int, optional
Value of regularization parameter.
alpha : int, optional
Alpha parameter of gamma distribution.
beta : int, optional
Beta parameter of gamma distribution.
loss : {"logistic", "hinge"}, optional
Name of the loss function.
memory : float, optional
Soft limit for memory consumption, in GB.
np : int, optional
Number of process to execute (mpirun option).
cleanup_files : bool, optional
If True, delete directory and files used by this wrapper.
sorted_vocab : {0, 1}, optional
If 1 - sort the vocabulary by descending frequency before assigning word indexes, otherwise - do nothing.
ensemble : {0, 1}, optional
If 1 - use ensemble of word and context vectors.
"""
# prepare training data (cooccurrence matrix and vocab)
model_dir = os.path.join(wr_path, out_name)
meta_dir = os.path.join(model_dir, 'meta')
os.makedirs(meta_dir)
logger.info("Dumped data will be stored in '%s'", model_dir)
copyfile(corpus_file, os.path.join(meta_dir, corpus_file.split('/')[-1]))
vocab_file = os.path.join(meta_dir, 'vocab.txt')
temp_vocab_file = os.path.join(meta_dir, 'tempvocab.txt')
cooccurrence_file = os.path.join(meta_dir, 'cooccurrence')
cooccurrence_shuf_file = os.path.join(meta_dir, 'wiki.toy')
meta_file = os.path.join(meta_dir, 'meta')
cmd_vocab_count = [
os.path.join(wr_path, 'glove', 'vocab_count'),
'-min-count', str(min_count), '-max-vocab', str(max_vocab_size)
]
cmd_cooccurence_count = [
os.path.join(wr_path, 'glove', 'cooccur'), '-memory', str(memory),
'-vocab-file', temp_vocab_file, '-window-size', str(window), '-symmetric', str(symmetric)
]
cmd_shuffle_cooccurences = [os.path.join(wr_path, 'glove', 'shuffle'), '-memory', str(memory)]
cmd_del_vocab_freq = ['cut', '-d', " ", '-f', '1', temp_vocab_file]
commands = [cmd_vocab_count, cmd_cooccurence_count, cmd_shuffle_cooccurences]
input_fnames = [
os.path.join(meta_dir, os.path.split(corpus_file)[-1]),
os.path.join(meta_dir, os.path.split(corpus_file)[-1]),
cooccurrence_file
]
output_fnames = [temp_vocab_file, cooccurrence_file, cooccurrence_shuf_file]
logger.info("Prepare training data (%s) using glove code", ", ".join(input_fnames))
for command, input_fname, output_fname in zip(commands, input_fnames, output_fnames):
with smart_open(input_fname, 'rb') as r:
with smart_open(output_fname, 'wb') as w:
utils.check_output(w, args=command, stdin=r)
logger.info("Deleting frequencies from vocab file")
with smart_open(vocab_file, 'wb') as w:
utils.check_output(w, args=cmd_del_vocab_freq)
with smart_open(vocab_file, 'rb') as f:
numwords = sum(1 for _ in f)
with smart_open(cooccurrence_shuf_file, 'rb') as f:
numlines = sum(1 for _ in f)
with smart_open(meta_file, 'wb') as f:
meta_info = "{0} {1}\n{2} {3}\n{4} {5}".format(
numwords, numwords, numlines, cooccurrence_shuf_file.split('/')[-1],
numwords, vocab_file.split('/')[-1]
)
f.write(meta_info.encode('utf-8'))
if iter % dump_period == 0:
iter += 1
else:
logger.warning(
"Resultant embedding will be from %d iterations rather than the input %d iterations, "
"as wordrank dumps the embedding only at dump_period intervals. "
"Input an appropriate combination of parameters (iter, dump_period) "
"such that \"iter mod dump_period\" is zero.",
iter - (iter % dump_period), iter
)
wr_args = {
'path': meta_dir,
'nthread': multiprocessing.cpu_count(),
'sgd_num': sgd_num,
'lrate': lrate,
'period': period,
'iter': iter,
'epsilon': epsilon,
'dump_prefix': 'model',
'dump_period': dump_period,
'dim': size,
'reg': reg,
'alpha': alpha,
'beta': beta,
'loss': loss
}
# run wordrank executable with wr_args
cmd = ['mpirun', '-np', str(np), os.path.join(wr_path, 'wordrank')]
for option, value in wr_args.items():
cmd.append('--%s' % option)
cmd.append(str(value))
logger.info("Running wordrank binary")
utils.check_output(args=cmd)
# use embeddings from max. iteration's dump
max_iter_dump = iter - (iter % dump_period)
os.rename('model_word_%d.txt' % max_iter_dump, os.path.join(model_dir, 'wordrank.words'))
os.rename('model_context_%d.txt' % max_iter_dump, os.path.join(model_dir, 'wordrank.contexts'))
model = cls.load_wordrank_model(
os.path.join(model_dir, 'wordrank.words'), vocab_file,
os.path.join(model_dir, 'wordrank.contexts'), sorted_vocab, ensemble
)
if cleanup_files:
rmtree(model_dir)
return model
@classmethod
def load_wordrank_model(cls, model_file, vocab_file=None, context_file=None, sorted_vocab=1, ensemble=1):
"""Load model from `model_file`.
Parameters
----------
model_file : str
Path to model in GloVe format.
vocab_file : str, optional
Path to file with vocabulary.
context_file : str, optional
Path to file with context-embedding in word2vec_format.
sorted_vocab : {0, 1}, optional
If 1 - sort the vocabulary by descending frequency before assigning word indexes, otherwise - do nothing.
ensemble : {0, 1}, optional
If 1 - use ensemble of word and context vectors.
"""
glove2word2vec(model_file, model_file + '.w2vformat')
model = cls.load_word2vec_format('%s.w2vformat' % model_file)
if ensemble and context_file:
model.ensemble_embedding(model_file, context_file)
if sorted_vocab and vocab_file:
model.sort_embeddings(vocab_file)
return model
def sort_embeddings(self, vocab_file):
"""Sort embeddings according to word frequency.
Parameters
----------
vocab_file : str
Path to file with vocabulary.
"""
counts = {}
vocab_size = len(self.vocab)
prev_syn0 = copy.deepcopy(self.syn0)
prev_vocab = copy.deepcopy(self.vocab)
self.index2word = []
# sort embeddings using frequency sorted vocab file in wordrank
with utils.smart_open(vocab_file) as fin:
for index, line in enumerate(fin):
word, count = utils.to_unicode(line).strip(), vocab_size - index
# store word with it's count in a dict
counts[word] = int(count)
# build new index2word with frequency sorted words
self.index2word.append(word)
assert len(self.index2word) == vocab_size, 'mismatch between vocab sizes'
for word_id, word in enumerate(self.index2word):
self.syn0[word_id] = prev_syn0[prev_vocab[word].index]
self.vocab[word].index = word_id
self.vocab[word].count = counts[word]
def ensemble_embedding(self, word_embedding, context_embedding):
"""Replace current syn0 with the sum of context and word embeddings.
Parameters
----------
word_embedding : str
Path to word embeddings in GloVe format.
context_embedding : str
Path to context embeddings in word2vec_format.
Returns
-------
numpy.ndarray
Matrix with new embeddings.
"""
glove2word2vec(context_embedding, context_embedding + '.w2vformat')
w_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % word_embedding)
c_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % context_embedding)
# compare vocab words using keys of dict vocab
assert set(w_emb.vocab) == set(c_emb.vocab), 'Vocabs are not same for both embeddings'
# sort context embedding to have words in same order as word embedding
prev_c_emb = copy.deepcopy(c_emb.syn0)
for word_id, word in enumerate(w_emb.index2word):
c_emb.syn0[word_id] = prev_c_emb[c_emb.vocab[word].index]
# add vectors of the two embeddings
new_emb = w_emb.syn0 + c_emb.syn0
self.syn0 = new_emb
return new_emb
| 13,370 | 39.518182 | 117 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/ldamallet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python wrapper for `Latent Dirichlet Allocation (LDA) <https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
from `MALLET, the Java topic modelling toolkit <http://mallet.cs.umass.edu/>`_
This module allows both LDA model estimation from a training corpus and inference of topic distribution on new,
unseen documents, using an (optimized version of) collapsed gibbs sampling from MALLET.
Notes
-----
MALLET's LDA training requires :math:O(#corpus_words) of memory, keeping the entire corpus in RAM.
If you find yourself running out of memory, either decrease the `workers` constructor parameter,
or use :class:`gensim.models.ldamodel.LdaModel` or :class:`gensim.models.ldamulticore.LdaMulticore`
which needs only :math:`O(1)` memory.
The wrapped model can NOT be updated with new documents for online training -- use
:class:`~gensim.models.ldamodel.LdaModel` or :class:`~gensim.models.ldamulticore.LdaMulticore` for that.
Installation
------------
Use `official guide <http://mallet.cs.umass.edu/download.php>`_ or this one ::
sudo apt-get install default-jdk
sudo apt-get install ant
git clone git@github.com:mimno/Mallet.git
cd Mallet/
ant
Examples
--------
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.wrappers import LdaMallet
>>>
>>> path_to_mallet_binary = "/path/to/mallet/binary"
>>> model = LdaMallet(path_to_mallet_binary, corpus=common_corpus, num_topics=20, id2word=common_dictionary)
>>> vector = model[common_corpus[0]] # LDA topics of a documents
"""
import logging
import os
import random
import warnings
import tempfile
import xml.etree.ElementTree as et
import zipfile
import numpy
from smart_open import smart_open
from gensim import utils, matutils
from gensim.models import basemodel
from gensim.models.ldamodel import LdaModel
from gensim.utils import check_output, revdict
logger = logging.getLogger(__name__)
class LdaMallet(utils.SaveLoad, basemodel.BaseTopicModel):
"""Python wrapper for LDA using `MALLET <http://mallet.cs.umass.edu/>`_.
Communication between MALLET and Python takes place by passing around data files on disk
and calling Java with subprocess.call().
Warnings
--------
This is **only** python wrapper for `MALLET LDA <http://mallet.cs.umass.edu/>`_,
you need to install original implementation first and pass the path to binary to ``mallet_path``.
"""
def __init__(self, mallet_path, corpus=None, num_topics=100, alpha=50, id2word=None, workers=4, prefix=None,
optimize_interval=0, iterations=1000, topic_threshold=0.0):
"""
Parameters
----------
mallet_path : str
Path to the mallet binary, e.g. `/home/username/mallet-2.0.7/bin/mallet`.
corpus : iterable of iterable of (int, int), optional
Collection of texts in BoW format.
num_topics : int, optional
Number of topics.
alpha : int, optional
Alpha parameter of LDA.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between tokens ids and words from corpus, if not specified - will be inferred from `corpus`.
workers : int, optional
Number of threads that will be used for training.
prefix : str, optional
Prefix for produced temporary files.
optimize_interval : int, optional
Optimize hyperparameters every `optimize_interval` iterations
(sometimes leads to Java exception 0 to switch off hyperparameter optimization).
iterations : int, optional
Number of training iterations.
topic_threshold : float, optional
Threshold of the probability above which we consider a topic.
"""
self.mallet_path = mallet_path
self.id2word = id2word
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 0 if not self.id2word else 1 + max(self.id2word.keys())
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.num_topics = num_topics
self.topic_threshold = topic_threshold
self.alpha = alpha
if prefix is None:
rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_'
prefix = os.path.join(tempfile.gettempdir(), rand_prefix)
self.prefix = prefix
self.workers = workers
self.optimize_interval = optimize_interval
self.iterations = iterations
if corpus is not None:
self.train(corpus)
def finferencer(self):
"""Get path to inferencer.mallet file.
Returns
-------
str
Path to inferencer.mallet file.
"""
return self.prefix + 'inferencer.mallet'
def ftopickeys(self):
"""Get path to topic keys text file.
Returns
-------
str
Path to topic keys text file.
"""
return self.prefix + 'topickeys.txt'
def fstate(self):
"""Get path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'state.mallet.gz'
def fdoctopics(self):
"""Get path to document topic text file.
Returns
-------
str
Path to document topic text file.
"""
return self.prefix + 'doctopics.txt'
def fcorpustxt(self):
"""Get path to corpus text file.
Returns
-------
str
Path to corpus text file.
"""
return self.prefix + 'corpus.txt'
def fcorpusmallet(self):
"""Get path to corpus.mallet file.
Returns
-------
str
Path to corpus.mallet file.
"""
return self.prefix + 'corpus.mallet'
def fwordweights(self):
"""Get path to word weight file.
Returns
-------
str
Path to word weight file.
"""
return self.prefix + 'wordweights.txt'
def corpus2mallet(self, corpus, file_like):
"""Convert `corpus` to Mallet format and write it to `file_like` descriptor.
Format ::
document id[SPACE]label (not used)[SPACE]whitespace delimited utf8-encoded tokens[NEWLINE]
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
file_like : file-like object
Opened file.
"""
for docno, doc in enumerate(corpus):
if self.id2word:
tokens = sum(([self.id2word[tokenid]] * int(cnt) for tokenid, cnt in doc), [])
else:
tokens = sum(([str(tokenid)] * int(cnt) for tokenid, cnt in doc), [])
file_like.write(utils.to_utf8("%s 0 %s\n" % (docno, ' '.join(tokens))))
def convert_input(self, corpus, infer=False, serialize_corpus=True):
"""Convert corpus to Mallet format and save it to a temporary text file.
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
infer : bool, optional
...
serialize_corpus : bool, optional
...
"""
if serialize_corpus:
logger.info("serializing temporary corpus to %s", self.fcorpustxt())
with smart_open(self.fcorpustxt(), 'wb') as fout:
self.corpus2mallet(corpus, fout)
# convert the text file above into MALLET's internal format
cmd = \
self.mallet_path + \
" import-file --preserve-case --keep-sequence " \
"--remove-stopwords --token-regex \"\S+\" --input %s --output %s"
if infer:
cmd += ' --use-pipe-from ' + self.fcorpusmallet()
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet() + '.infer')
else:
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet())
logger.info("converting temporary corpus to MALLET format with %s", cmd)
check_output(args=cmd, shell=True)
def train(self, corpus):
"""Train Mallet LDA.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus in BoW format
"""
self.convert_input(corpus, infer=False)
cmd = self.mallet_path + ' train-topics --input %s --num-topics %s --alpha %s --optimize-interval %s '\
'--num-threads %s --output-state %s --output-doc-topics %s --output-topic-keys %s '\
'--num-iterations %s --inferencer-filename %s --doc-topics-threshold %s'
cmd = cmd % (
self.fcorpusmallet(), self.num_topics, self.alpha, self.optimize_interval,
self.workers, self.fstate(), self.fdoctopics(), self.ftopickeys(), self.iterations,
self.finferencer(), self.topic_threshold
)
# NOTE "--keep-sequence-bigrams" / "--use-ngrams true" poorer results + runs out of memory
logger.info("training MALLET LDA with %s", cmd)
check_output(args=cmd, shell=True)
self.word_topics = self.load_word_topics()
# NOTE - we are still keeping the wordtopics variable to not break backward compatibility.
# word_topics has replaced wordtopics throughout the code;
# wordtopics just stores the values of word_topics when train is called.
self.wordtopics = self.word_topics
def __getitem__(self, bow, iterations=100):
"""Get vector for document(s).
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Document (or corpus) in BoW format.
iterations : int, optional
Number of iterations that will be used for inferring.
Returns
-------
list of (int, float)
LDA vector for document as sequence of (topic_id, topic_probability) **OR**
list of list of (int, float)
LDA vectors for corpus in same format.
"""
is_corpus, corpus = utils.is_corpus(bow)
if not is_corpus:
# query is a single document => make a corpus out of it
bow = [bow]
self.convert_input(bow, infer=True)
cmd = \
self.mallet_path + ' infer-topics --input %s --inferencer %s ' \
'--output-doc-topics %s --num-iterations %s --doc-topics-threshold %s'
cmd = cmd % (
self.fcorpusmallet() + '.infer', self.finferencer(),
self.fdoctopics() + '.infer', iterations, self.topic_threshold
)
logger.info("inferring topics with MALLET LDA '%s'", cmd)
check_output(args=cmd, shell=True)
result = list(self.read_doctopics(self.fdoctopics() + '.infer'))
return result if is_corpus else result[0]
def load_word_topics(self):
"""Load words X topics matrix from :meth:`gensim.models.wrappers.ldamallet.LdaMallet.fstate` file.
Returns
-------
numpy.ndarray
Matrix words X topics.
"""
logger.info("loading assigned topics from %s", self.fstate())
word_topics = numpy.zeros((self.num_topics, self.num_terms), dtype=numpy.float64)
if hasattr(self.id2word, 'token2id'):
word2id = self.id2word.token2id
else:
word2id = revdict(self.id2word)
with utils.smart_open(self.fstate()) as fin:
_ = next(fin) # header
self.alpha = numpy.array([float(val) for val in next(fin).split()[2:]])
assert len(self.alpha) == self.num_topics, "mismatch between MALLET vs. requested topics"
_ = next(fin) # noqa:F841 beta
for lineno, line in enumerate(fin):
line = utils.to_unicode(line)
doc, source, pos, typeindex, token, topic = line.split(" ")
if token not in word2id:
continue
tokenid = word2id[token]
word_topics[int(topic), tokenid] += 1.0
return word_topics
def load_document_topics(self):
"""Load document topics from :meth:`gensim.models.wrappers.ldamallet.LdaMallet.fdoctopics` file.
Shortcut for :meth:`gensim.models.wrappers.ldamallet.LdaMallet.read_doctopics`.
Returns
-------
iterator of list of (int, float)
Sequence of LDA vectors for documents.
"""
return self.read_doctopics(self.fdoctopics())
def get_topics(self):
"""Get topics X words matrix.
Returns
-------
numpy.ndarray
Topics X words matrix, shape `num_topics` x `vocabulary_size`.
"""
topics = self.word_topics
return topics / topics.sum(axis=1)[:, None]
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Get the `num_words` most probable words for `num_topics` number of topics.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
num_words : int, optional
Number of words.
log : bool, optional
If True - write topic with logging too, used for debug proposes.
formatted : bool, optional
If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs.
Returns
-------
list of str
Topics as a list of strings (if formatted=True) **OR**
list of (float, str)
Topics as list of (weight, word) pairs (if formatted=False)
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * numpy.random.rand(len(self.alpha))
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic)
return shown
def show_topic(self, topicid, topn=10, num_words=None):
"""Get `num_words` most probable words for the given `topicid`.
Parameters
----------
topicid : int
Id of topic.
topn : int, optional
Top number of topics that you'll receive.
num_words : int, optional
DEPRECATED PARAMETER, use `topn` instead.
Returns
-------
list of (str, float)
Sequence of probable words, as a list of `(word, word_probability)` for `topicid` topic.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.")
topn = num_words
if self.word_topics is None:
logger.warning("Run train or load_word_topics before showing topics.")
topic = self.word_topics[topicid]
topic = topic / topic.sum() # normalize to probability dist
bestn = matutils.argsort(topic, topn, reverse=True)
beststr = [(self.id2word[idx], topic[idx]) for idx in bestn]
return beststr
def get_version(self, direc_path):
""""Get the version of Mallet.
Parameters
----------
direc_path : str
Path to mallet archive.
Returns
-------
str
Version of mallet.
"""
try:
archive = zipfile.ZipFile(direc_path, 'r')
if u'cc/mallet/regression/' not in archive.namelist():
return '2.0.7'
else:
return '2.0.8RC3'
except Exception:
xml_path = direc_path.split("bin")[0]
try:
doc = et.parse(xml_path + "pom.xml").getroot()
namespace = doc.tag[:doc.tag.index('}') + 1]
return doc.find(namespace + 'version').text.split("-")[0]
except Exception:
return "Can't parse pom.xml version file"
def read_doctopics(self, fname, eps=1e-6, renorm=True):
"""Get document topic vectors from MALLET's "doc-topics" format, as sparse gensim vectors.
Parameters
----------
fname : str
Path to input file with document topics.
eps : float, optional
Threshold for probabilities.
renorm : bool, optional
If True - explicitly re-normalize distribution.
Raises
------
RuntimeError
If any line in invalid format.
Yields
------
list of (int, float)
LDA vectors for document.
"""
mallet_version = self.get_version(self.mallet_path)
with utils.smart_open(fname) as fin:
for lineno, line in enumerate(fin):
if lineno == 0 and line.startswith(b"#doc "):
continue # skip the header line if it exists
parts = line.split()[2:] # skip "doc" and "source" columns
# the MALLET doctopic format changed in 2.0.8 to exclude the id,
# this handles the file differently dependent on the pattern
if len(parts) == 2 * self.num_topics:
doc = [
(int(id_), float(weight)) for id_, weight in zip(*[iter(parts)] * 2)
if abs(float(weight)) > eps
]
elif len(parts) == self.num_topics and mallet_version != '2.0.7':
doc = [(id_, float(weight)) for id_, weight in enumerate(parts) if abs(float(weight)) > eps]
else:
if mallet_version == "2.0.7":
"""
1 1 0 1.0780612802674239 30.005575655428533364 2 0.005575655428533364
2 2 0 0.9184413079632608 40.009062076892971008 3 0.009062076892971008
In the above example there is a mix of the above if and elif statement.
There are neither `2*num_topics` nor `num_topics` elements.
It has 2 formats 40.009062076892971008 and 0 1.0780612802674239
which cannot be handled by above if elif.
Also, there are some topics are missing(meaning that the topic is not there)
which is another reason why the above if elif fails even when the `mallet`
produces the right results
"""
count = 0
doc = []
if len(parts) > 0:
while count < len(parts):
"""
if section is to deal with formats of type 2 0.034
so if count reaches index of 2 and since int(2) == float(2) so if block is executed
now there is one extra element afer 2, so count + 1 access should not give an error
else section handles formats of type 20.034
now count is there on index of 20.034 since float(20.034) != int(20.034) so else block
is executed
"""
if float(parts[count]) == int(parts[count]):
if float(parts[count + 1]) > eps:
doc.append((int(parts[count]), float(parts[count + 1])))
count += 2
else:
if float(parts[count]) - int(parts[count]) > eps:
doc.append((int(parts[count]) % 10, float(parts[count]) - int(parts[count])))
count += 1
else:
raise RuntimeError("invalid doc topics format at line %i in %s" % (lineno + 1, fname))
if renorm:
# explicitly normalize weights to sum up to 1.0, just to be sure...
total_weight = float(sum([weight for _, weight in doc]))
if total_weight:
doc = [(id_, float(weight) / total_weight) for id_, weight in doc]
yield doc
def malletmodel2ldamodel(mallet_model, gamma_threshold=0.001, iterations=50):
"""Convert :class:`~gensim.models.wrappers.ldamallet.LdaMallet` to :class:`~gensim.models.ldamodel.LdaModel`.
This works by copying the training model weights (alpha, beta...) from a trained mallet model into the gensim model.
Parameters
----------
mallet_model : :class:`~gensim.models.wrappers.ldamallet.LdaMallet`
Trained Mallet model
gamma_threshold : float, optional
To be used for inference in the new LdaModel.
iterations : int, optional
Number of iterations to be used for inference in the new LdaModel.
Returns
-------
:class:`~gensim.models.ldamodel.LdaModel`
Gensim native LDA.
"""
model_gensim = LdaModel(
id2word=mallet_model.id2word, num_topics=mallet_model.num_topics,
alpha=mallet_model.alpha, iterations=iterations,
gamma_threshold=gamma_threshold,
dtype=numpy.float64 # don't loose precision when converting from MALLET
)
model_gensim.expElogbeta[:] = mallet_model.wordtopics
return model_gensim
| 22,613 | 37.134907 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/varembed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Anmol Gulati <anmol01gulati@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
"""Python wrapper around `Varembed model <https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings>`_.
Original paper:`"Morphological Priors for Probabilistic Neural Word Embeddings" <http://arxiv.org/pdf/1608.01056.pdf>`_.
Notes
-----
* This module allows ability to obtain word vectors for out-of-vocabulary words, for the Varembed model.
* The wrapped model can not be updated with new documents for online training.
"""
import logging
import numpy as np
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
from gensim.models.word2vec import Vocab
logger = logging.getLogger(__name__)
class VarEmbed(KeyedVectors):
"""Python wrapper using `Varembed <https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings>`_.
Warnings
--------
This is **only** python wrapper for `Varembed <https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings>`_,
this allows to load pre-trained models only.
"""
def __init__(self):
self.vector_size = 0
self.vocab_size = 0
@classmethod
def load_varembed_format(cls, vectors, morfessor_model=None):
"""Load the word vectors into matrix from the varembed output vector files.
Parameters
----------
vectors : dict
Pickle file containing the word vectors.
morfessor_model : str, optional
Path to the trained morfessor model.
Returns
-------
:class:`~gensim.models.wrappers.varembed.VarEmbed`
Ready to use instance.
"""
result = cls()
if vectors is None:
raise Exception("Please provide vectors binary to load varembed model")
d = utils.unpickle(vectors)
word_to_ix = d['word_to_ix']
morpho_to_ix = d['morpho_to_ix']
word_embeddings = d['word_embeddings']
morpho_embeddings = d['morpheme_embeddings']
result.load_word_embeddings(word_embeddings, word_to_ix)
if morfessor_model:
try:
import morfessor
morfessor_model = morfessor.MorfessorIO().read_binary_model_file(morfessor_model)
result.add_morphemes_to_embeddings(morfessor_model, morpho_embeddings, morpho_to_ix)
except ImportError:
# Morfessor Package not found.
logger.error('Could not import morfessor. Not using morpheme embeddings')
raise ImportError('Could not import morfessor.')
logger.info('Loaded varembed model vectors from %s', vectors)
return result
def load_word_embeddings(self, word_embeddings, word_to_ix):
"""Loads the word embeddings.
Parameters
----------
word_embeddings : numpy.ndarray
Matrix with word-embeddings.
word_to_ix : dict of (str, int)
Mapping word to index.
"""
logger.info("Loading the vocabulary")
self.vocab = {}
self.index2word = []
counts = {}
for word in word_to_ix:
counts[word] = counts.get(word, 0) + 1
self.vocab_size = len(counts)
self.vector_size = word_embeddings.shape[1]
self.syn0 = np.zeros((self.vocab_size, self.vector_size))
self.index2word = [None] * self.vocab_size
logger.info("Corpus has %i words", len(self.vocab))
for word_id, word in enumerate(counts):
self.vocab[word] = Vocab(index=word_id, count=counts[word])
self.syn0[word_id] = word_embeddings[word_to_ix[word]]
self.index2word[word_id] = word
assert((len(self.vocab), self.vector_size) == self.syn0.shape)
logger.info("Loaded matrix of %d size and %d dimensions", self.vocab_size, self.vector_size)
def add_morphemes_to_embeddings(self, morfessor_model, morpho_embeddings, morpho_to_ix):
"""Include morpheme embeddings into vectors.
Parameters
----------
morfessor_model : :class:`morfessor.baseline.BaselineModel`
Morfessor model.
morpho_embeddings : dict
Pickle file containing morpheme embeddings.
morpho_to_ix : dict
Mapping morpheme to index.
"""
for word in self.vocab:
morpheme_embedding = np.array(
[
morpho_embeddings[morpho_to_ix.get(m, -1)]
for m in morfessor_model.viterbi_segment(word)[0]
]
).sum(axis=0)
self.syn0[self.vocab[word].index] += morpheme_embedding
logger.info("Added morphemes to word vectors")
| 4,782 | 35.792308 | 120 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/fasttext.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.2.0
Use :mod:`gensim.models.fasttext` instead.
Python wrapper around word representation learning from FastText, a library for efficient learning
of word representations and sentence classification [1].
This module allows training a word embedding from a training corpus with the additional ability
to obtain word vectors for out-of-vocabulary words, using the fastText C implementation.
The wrapped model can NOT be updated with new documents for online training -- use gensim's
`Word2Vec` for that.
Example:
>>> from gensim.models.wrappers import FastText
>>> model = FastText.train('/Users/kofola/fastText/fasttext', corpus_file='text8')
>>> print model['forests'] # prints vector for given out-of-vocabulary word
.. [1] https://github.com/facebookresearch/fastText#enriching-word-vectors-with-subword-information
"""
from gensim.models.deprecated.fasttext_wrapper import FastText, FastTextKeyedVectors # noqa:F401
from gensim.models.deprecated.fasttext_wrapper import ft_hash, compute_ngrams # noqa:F401
| 1,296 | 32.25641 | 99 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/__init__.py | """
This package contains wrappers for other topic modeling programs.
"""
from .ldamallet import LdaMallet # noqa:F401
from .dtmmodel import DtmModel # noqa:F401
from .ldavowpalwabbit import LdaVowpalWabbit # noqa:F401
from .fasttext import FastText # noqa:F401
from .wordrank import Wordrank # noqa:F401
from .varembed import VarEmbed # noqa:F401
| 355 | 31.363636 | 65 | py |
poincare_glove | poincare_glove-master/gensim/models/wrappers/dtmmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Artyom Topchyan <artyom.topchyan@live.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
# Based on Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
"""Python wrapper for `Dynamic Topic Models (DTM) <http://www.cs.columbia.edu/~blei/papers/BleiLafferty2006a.pdf>`_
and the `Document Influence Model (DIM) <http://www.cs.columbia.edu/~blei/papers/GerrishBlei2010.pdf>`_.
Installation
------------
You have 2 ways, how to make binaries:
#. Use precompiled binaries for your OS version from `/magsilva/dtm/ <https://github.com/magsilva/dtm/tree/master/bin>`_
#. Compile binaries manually from `/blei-lab/dtm <https://github.com/blei-lab/dtm.git>`_
(original instruction available in https://github.com/blei-lab/dtm/blob/master/README.md), or use this ::
git clone https://github.com/blei-lab/dtm.git
sudo apt-get install libgsl0-dev
cd dtm/dtm
make
Examples
--------
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models.wrappers import DtmModel
>>>
>>> path_to_dtm_binary = "/path/to/dtm/binary"
>>> model = DtmModel(
... path_to_dtm_binary, corpus=common_corpus, id2word=common_dictionary,
... time_slices=[1] * len(common_corpus)
... )
"""
import logging
import random
import warnings
import tempfile
import os
from subprocess import PIPE
import numpy as np
from gensim import utils, corpora, matutils
from gensim.utils import check_output
logger = logging.getLogger(__name__)
class DtmModel(utils.SaveLoad):
"""Python wrapper using `DTM implementation <https://github.com/magsilva/dtm/tree/master/bin>`_.
Communication between DTM and Python takes place by passing around data files on disk and executing
the DTM binary as a subprocess.
Warnings
--------
This is **only** python wrapper for `DTM implementation <https://github.com/magsilva/dtm/tree/master/bin>`_,
you need to install original implementation first and pass the path to binary to ``dtm_path``.
"""
def __init__(self, dtm_path, corpus=None, time_slices=None, mode='fit', model='dtm', num_topics=100,
id2word=None, prefix=None, lda_sequence_min_iter=6, lda_sequence_max_iter=20, lda_max_em_iter=10,
alpha=0.01, top_chain_var=0.005, rng_seed=0, initialize_lda=True):
"""
Parameters
----------
dtm_path : str
Path to the dtm binary, e.g. `/home/username/dtm/dtm/main`.
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
time_slices : list of int
Sequence of timestamps.
mode : {'fit', 'time'}, optional
Controls the mode of the mode: 'fit' is for training, 'time' for analyzing documents through time
according to a DTM, basically a held out set.
model : {'fixed', 'dtm'}, optional
Control model that will be runned: 'fixed' is for DIM and 'dtm' for DTM.
num_topics : int, optional
Number of topics.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between tokens ids and words from corpus, if not specified - will be inferred from `corpus`.
prefix : str, optional
Prefix for produced temporary files.
lda_sequence_min_iter : int, optional
Min iteration of LDA.
lda_sequence_max_iter : int, optional
Max iteration of LDA.
lda_max_em_iter : int, optional
Max em optimization iterations in LDA.
alpha : int, optional
Hyperparameter that affects sparsity of the document-topics for the LDA models in each timeslice.
top_chain_var : int, optional
Hyperparameter that affects.
rng_seed : int, optional
Random seed.
initialize_lda : bool, optional
If True - initialize DTM with LDA.
"""
if not os.path.isfile(dtm_path):
raise ValueError("dtm_path must point to the binary file, not to a folder")
self.dtm_path = dtm_path
self.id2word = id2word
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 0 if not self.id2word else 1 + max(self.id2word.keys())
if self.num_terms == 0:
raise ValueError("cannot compute DTM over an empty collection (no terms)")
self.num_topics = num_topics
try:
lencorpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
raise ValueError("cannot compute DTM over an empty corpus")
if model == "fixed" and any(not text for text in corpus):
raise ValueError("""There is a text without words in the input corpus.
This breaks method='fixed' (The DIM model).""")
if lencorpus != sum(time_slices):
raise ValueError(
"mismatched timeslices %{slices} for corpus of len {clen}"
.format(slices=sum(time_slices), clen=lencorpus)
)
self.lencorpus = lencorpus
if prefix is None:
rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_'
prefix = os.path.join(tempfile.gettempdir(), rand_prefix)
self.prefix = prefix
self.time_slices = time_slices
self.lda_sequence_min_iter = int(lda_sequence_min_iter)
self.lda_sequence_max_iter = int(lda_sequence_max_iter)
self.lda_max_em_iter = int(lda_max_em_iter)
self.alpha = alpha
self.top_chain_var = top_chain_var
self.rng_seed = rng_seed
self.initialize_lda = str(initialize_lda).lower()
self.lambda_ = None
self.obs_ = None
self.lhood_ = None
self.gamma_ = None
self.init_alpha = None
self.init_beta = None
self.init_ss = None
self.em_steps = []
self.influences_time = []
if corpus is not None:
self.train(corpus, time_slices, mode, model)
def fout_liklihoods(self):
"""Get path to temporary lhood data file.
Returns
-------
str
Path to lhood data file.
"""
return self.prefix + 'train_out/lda-seq/' + 'lhoods.dat'
def fout_gamma(self):
"""Get path to temporary gamma data file.
Returns
-------
str
Path to gamma data file.
"""
return self.prefix + 'train_out/lda-seq/' + 'gam.dat'
def fout_prob(self):
"""Get template of path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'train_out/lda-seq/' + 'topic-{i}-var-e-log-prob.dat'
def fout_observations(self):
"""Get template of path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'train_out/lda-seq/' + 'topic-{i}-var-obs.dat'
def fout_influence(self):
"""Get template of path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'train_out/lda-seq/' + 'influence_time-{i}'
def foutname(self):
"""Get path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'train_out'
def fem_steps(self):
"""Get path to temporary em_step data file.
Returns
-------
str
Path to em_step data file.
"""
return self.prefix + 'train_out/' + 'em_log.dat'
def finit_alpha(self):
"""Get path to initially trained lda alpha file.
Returns
-------
str
Path to initially trained lda alpha file.
"""
return self.prefix + 'train_out/' + 'initial-lda.alpha'
def finit_beta(self):
"""Get path to initially trained lda beta file.
Returns
-------
str
Path to initially trained lda beta file.
"""
return self.prefix + 'train_out/' + 'initial-lda.beta'
def flda_ss(self):
"""Get path to initial lda binary file.
Returns
-------
str
Path to initial lda binary file.
"""
return self.prefix + 'train_out/' + 'initial-lda-ss.dat'
def fcorpustxt(self):
"""Get path to temporary file.
Returns
-------
str
Path to multiple train binary file.
"""
return self.prefix + 'train-mult.dat'
def fcorpus(self):
"""Get path to corpus file.
Returns
-------
str
Path to corpus file.
"""
return self.prefix + 'train'
def ftimeslices(self):
"""Get path to time slices binary file.
Returns
-------
str
Path to time slices binary file.
"""
return self.prefix + 'train-seq.dat'
def convert_input(self, corpus, time_slices):
"""Convert corpus into LDA-C format by :class:`~gensim.corpora.bleicorpus.BleiCorpus` and save to temp file.
Path to temporary file produced by :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.ftimeslices`.
Parameters
----------
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
time_slices : list of int
Sequence of timestamps.
"""
logger.info("serializing temporary corpus to %s", self.fcorpustxt())
# write out the corpus in a file format that DTM understands:
corpora.BleiCorpus.save_corpus(self.fcorpustxt(), corpus)
with utils.smart_open(self.ftimeslices(), 'wb') as fout:
fout.write(utils.to_utf8(str(len(self.time_slices)) + "\n"))
for sl in time_slices:
fout.write(utils.to_utf8(str(sl) + "\n"))
def train(self, corpus, time_slices, mode, model):
"""Train DTM model.
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
time_slices : list of int
Sequence of timestamps.
mode : {'fit', 'time'}, optional
Controls the mode of the mode: 'fit' is for training, 'time' for analyzing documents through time
according to a DTM, basically a held out set.
model : {'fixed', 'dtm'}, optional
Control model that will be runned: 'fixed' is for DIM and 'dtm' for DTM.
"""
self.convert_input(corpus, time_slices)
arguments = \
"--ntopics={p0} --model={mofrl} --mode={p1} --initialize_lda={p2} --corpus_prefix={p3} " \
"--outname={p4} --alpha={p5}".format(
p0=self.num_topics, mofrl=model, p1=mode, p2=self.initialize_lda,
p3=self.fcorpus(), p4=self.foutname(), p5=self.alpha
)
params = \
"--lda_max_em_iter={p0} --lda_sequence_min_iter={p1} --lda_sequence_max_iter={p2} " \
"--top_chain_var={p3} --rng_seed={p4} ".format(
p0=self.lda_max_em_iter, p1=self.lda_sequence_min_iter, p2=self.lda_sequence_max_iter,
p3=self.top_chain_var, p4=self.rng_seed
)
arguments = arguments + " " + params
logger.info("training DTM with args %s", arguments)
cmd = [self.dtm_path] + arguments.split()
logger.info("Running command %s", cmd)
check_output(args=cmd, stderr=PIPE)
self.em_steps = np.loadtxt(self.fem_steps())
self.init_ss = np.loadtxt(self.flda_ss())
if self.initialize_lda:
self.init_alpha = np.loadtxt(self.finit_alpha())
self.init_beta = np.loadtxt(self.finit_beta())
self.lhood_ = np.loadtxt(self.fout_liklihoods())
# document-topic proportions
self.gamma_ = np.loadtxt(self.fout_gamma())
# cast to correct shape, gamme[5,10] is the proprtion of the 10th topic
# in doc 5
self.gamma_.shape = (self.lencorpus, self.num_topics)
# normalize proportions
self.gamma_ /= self.gamma_.sum(axis=1)[:, np.newaxis]
self.lambda_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices)))
self.obs_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices)))
for t in range(self.num_topics):
topic = "%03d" % t
self.lambda_[t, :] = np.loadtxt(self.fout_prob().format(i=topic))
self.obs_[t, :] = np.loadtxt(self.fout_observations().format(i=topic))
# cast to correct shape, lambda[5,10,0] is the proportion of the 10th
# topic in doc 5 at time 0
self.lambda_.shape = (self.num_topics, self.num_terms, len(self.time_slices))
self.obs_.shape = (self.num_topics, self.num_terms, len(self.time_slices))
# extract document influence on topics for each time slice
# influences_time[0] , influences at time 0
if model == 'fixed':
for k, t in enumerate(self.time_slices):
stamp = "%03d" % k
influence = np.loadtxt(self.fout_influence().format(i=stamp))
influence.shape = (t, self.num_topics)
# influence[2,5] influence of document 2 on topic 5
self.influences_time.append(influence)
def print_topics(self, num_topics=10, times=5, num_words=10):
"""Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.show_topics`.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
times : int, optional
Number of times.
num_words : int, optional
Number of words.
Returns
-------
list of str
Topics as a list of strings
"""
return self.show_topics(num_topics, times, num_words, log=True)
def show_topics(self, num_topics=10, times=5, num_words=10, log=False, formatted=True):
"""Get the `num_words` most probable words for `num_topics` number of topics at 'times' time slices.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
times : int, optional
Number of times.
num_words : int, optional
Number of words.
log : bool, optional
THIS PARAMETER WILL BE IGNORED.
formatted : bool, optional
If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs.
Returns
-------
list of str
Topics as a list of strings (if formatted=True) **OR**
list of (float, str)
Topics as list of (weight, word) pairs (if formatted=False)
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
chosen_topics = range(num_topics)
if times < 0 or times >= len(self.time_slices):
times = len(self.time_slices)
chosen_times = range(times)
else:
times = min(times, len(self.time_slices))
chosen_times = range(times)
shown = []
for time in chosen_times:
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, time, num_words=num_words)
else:
topic = self.show_topic(i, time, num_words=num_words)
shown.append(topic)
return shown
def show_topic(self, topicid, time, topn=50, num_words=None):
"""Get `num_words` most probable words for the given `topicid`.
Parameters
----------
topicid : int
Id of topic.
time : int
Timestamp.
topn : int, optional
Top number of topics that you'll receive.
num_words : int, optional
DEPRECATED PARAMETER, use `topn` instead.
Returns
-------
list of (float, str)
Sequence of probable words, as a list of `(word_probability, word)`.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.")
topn = num_words
topics = self.lambda_[:, :, time]
topic = topics[topicid]
# likelihood to probability
topic = np.exp(topic)
# normalize to probability dist
topic = topic / topic.sum()
# sort according to prob
bestn = matutils.argsort(topic, topn, reverse=True)
beststr = [(topic[idx], self.id2word[idx]) for idx in bestn]
return beststr
def print_topic(self, topicid, time, topn=10, num_words=None):
"""Get the given topic, formatted as a string.
Parameters
----------
topicid : int
Id of topic.
time : int
Timestamp.
topn : int, optional
Top number of topics that you'll receive.
num_words : int, optional
DEPRECATED PARAMETER, use `topn` instead.
Returns
-------
str
The given topic in string format, like '0.132*someword + 0.412*otherword + ...'.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.")
topn = num_words
return ' + '.join(['%.3f*%s' % v for v in self.show_topic(topicid, time, topn)])
def dtm_vis(self, corpus, time):
"""Get data specified by pyLDAvis format.
Parameters
----------
corpus : iterable of iterable of (int, float)
Collection of texts in BoW format.
time : int
Sequence of timestamp.
Notes
-----
All of these are needed to visualise topics for DTM for a particular time-slice via pyLDAvis.
Returns
-------
doc_topic : numpy.ndarray
Document-topic proportions.
topic_term : numpy.ndarray
Calculated term of topic suitable for pyLDAvis format.
doc_lengths : list of int
Length of each documents in corpus.
term_frequency : numpy.ndarray
Frequency of each word from vocab.
vocab : list of str
List of words from docpus.
"""
topic_term = np.exp(self.lambda_[:, :, time]) / np.exp(self.lambda_[:, :, time]).sum()
topic_term *= self.num_topics
doc_topic = self.gamma_
doc_lengths = [len(doc) for doc_no, doc in enumerate(corpus)]
term_frequency = np.zeros(len(self.id2word))
for doc_no, doc in enumerate(corpus):
for pair in doc:
term_frequency[pair[0]] += pair[1]
vocab = [self.id2word[i] for i in range(0, len(self.id2word))]
# returns numpy arrays for doc_topic proportions, topic_term proportions, and document_lengths, term_frequency.
# these should be passed to the `pyLDAvis.prepare` method to visualise one time-slice of DTM topics.
return doc_topic, topic_term, doc_lengths, term_frequency, vocab
def dtm_coherence(self, time, num_words=20):
"""Get all topics of a particular time-slice without probability values for it to be used.
For either "u_mass" or "c_v" coherence.
Parameters
----------
num_words : int
Number of words.
time : int
Timestamp
Returns
-------
coherence_topics : list of list of str
All topics of a particular time-slice without probability values for it to be used.
Warnings
--------
TODO: because of print format right now can only return for 1st time-slice, should we fix the coherence
printing or make changes to the print statements to mirror DTM python?
"""
coherence_topics = []
for topic_no in range(0, self.num_topics):
topic = self.show_topic(topicid=topic_no, time=time, num_words=num_words)
coherence_topic = []
for prob, word in topic:
coherence_topic.append(word)
coherence_topics.append(coherence_topic)
return coherence_topics
| 21,044 | 33.5 | 120 | py |
poincare_glove | poincare_glove-master/gensim/parsing/preprocessing.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains methods for parsing and preprocessing strings. Let's consider the most noticeable:
* :func:`~gensim.parsing.preprocessing.remove_stopwords` - remove all stopwords from string
* :func:`~gensim.parsing.preprocessing.preprocess_string` - preprocess string (in default NLP meaning)
Examples:
---------
>>> from gensim.parsing.preprocessing import remove_stopwords
>>> remove_stopwords("Better late than never, but better never late.")
u'Better late never, better late.'
>>>
>>> preprocess_string("<i>Hel 9lo</i> <b>Wo9 rld</b>! Th3 weather_is really g00d today, isn't it?")
[u'hel', u'rld', u'weather', u'todai', u'isn']
Data:
-----
.. data:: STOPWORDS - Set of stopwords from Stone, Denis, Kwantes (2010).
.. data:: RE_PUNCT - Regexp for search an punctuation.
.. data:: RE_TAGS - Regexp for search an tags.
.. data:: RE_NUMERIC - Regexp for search an numbers.
.. data:: RE_NONALPHA - Regexp for search an non-alphabetic character.
.. data:: RE_AL_NUM - Regexp for search a position between letters and digits.
.. data:: RE_NUM_AL - Regexp for search a position between digits and letters .
.. data:: RE_WHITESPACE - Regexp for search space characters.
.. data:: DEFAULT_FILTERS - List of function for string preprocessing.
"""
import re
import string
import glob
from gensim import utils
from gensim.parsing.porter import PorterStemmer
STOPWORDS = frozenset([
'all', 'six', 'just', 'less', 'being', 'indeed', 'over', 'move', 'anyway', 'four', 'not', 'own', 'through',
'using', 'fify', 'where', 'mill', 'only', 'find', 'before', 'one', 'whose', 'system', 'how', 'somewhere',
'much', 'thick', 'show', 'had', 'enough', 'should', 'to', 'must', 'whom', 'seeming', 'yourselves', 'under',
'ours', 'two', 'has', 'might', 'thereafter', 'latterly', 'do', 'them', 'his', 'around', 'than', 'get', 'very',
'de', 'none', 'cannot', 'every', 'un', 'they', 'front', 'during', 'thus', 'now', 'him', 'nor', 'name', 'regarding',
'several', 'hereafter', 'did', 'always', 'who', 'didn', 'whither', 'this', 'someone', 'either', 'each', 'become',
'thereupon', 'sometime', 'side', 'towards', 'therein', 'twelve', 'because', 'often', 'ten', 'our', 'doing', 'km',
'eg', 'some', 'back', 'used', 'up', 'go', 'namely', 'computer', 'are', 'further', 'beyond', 'ourselves', 'yet',
'out', 'even', 'will', 'what', 'still', 'for', 'bottom', 'mine', 'since', 'please', 'forty', 'per', 'its',
'everything', 'behind', 'does', 'various', 'above', 'between', 'it', 'neither', 'seemed', 'ever', 'across', 'she',
'somehow', 'be', 'we', 'full', 'never', 'sixty', 'however', 'here', 'otherwise', 'were', 'whereupon', 'nowhere',
'although', 'found', 'alone', 're', 'along', 'quite', 'fifteen', 'by', 'both', 'about', 'last', 'would',
'anything', 'via', 'many', 'could', 'thence', 'put', 'against', 'keep', 'etc', 'amount', 'became', 'ltd', 'hence',
'onto', 'or', 'con', 'among', 'already', 'co', 'afterwards', 'formerly', 'within', 'seems', 'into', 'others',
'while', 'whatever', 'except', 'down', 'hers', 'everyone', 'done', 'least', 'another', 'whoever', 'moreover',
'couldnt', 'throughout', 'anyhow', 'yourself', 'three', 'from', 'her', 'few', 'together', 'top', 'there', 'due',
'been', 'next', 'anyone', 'eleven', 'cry', 'call', 'therefore', 'interest', 'then', 'thru', 'themselves',
'hundred', 'really', 'sincere', 'empty', 'more', 'himself', 'elsewhere', 'mostly', 'on', 'fire', 'am', 'becoming',
'hereby', 'amongst', 'else', 'part', 'everywhere', 'too', 'kg', 'herself', 'former', 'those', 'he', 'me', 'myself',
'made', 'twenty', 'these', 'was', 'bill', 'cant', 'us', 'until', 'besides', 'nevertheless', 'below', 'anywhere',
'nine', 'can', 'whether', 'of', 'your', 'toward', 'my', 'say', 'something', 'and', 'whereafter', 'whenever',
'give', 'almost', 'wherever', 'is', 'describe', 'beforehand', 'herein', 'doesn', 'an', 'as', 'itself', 'at',
'have', 'in', 'seem', 'whence', 'ie', 'any', 'fill', 'again', 'hasnt', 'inc', 'thereby', 'thin', 'no', 'perhaps',
'latter', 'meanwhile', 'when', 'detail', 'same', 'wherein', 'beside', 'also', 'that', 'other', 'take', 'which',
'becomes', 'you', 'if', 'nobody', 'unless', 'whereas', 'see', 'though', 'may', 'after', 'upon', 'most', 'hereupon',
'eight', 'but', 'serious', 'nothing', 'such', 'why', 'off', 'a', 'don', 'whereby', 'third', 'i', 'whole', 'noone',
'sometimes', 'well', 'amoungst', 'yours', 'their', 'rather', 'without', 'so', 'five', 'the', 'first', 'with',
'make', 'once'
])
RE_PUNCT = re.compile(r'([%s])+' % re.escape(string.punctuation), re.UNICODE)
RE_TAGS = re.compile(r"<([^>]+)>", re.UNICODE)
RE_NUMERIC = re.compile(r"[0-9]+", re.UNICODE)
RE_NONALPHA = re.compile(r"\W", re.UNICODE)
RE_AL_NUM = re.compile(r"([a-z]+)([0-9]+)", flags=re.UNICODE)
RE_NUM_AL = re.compile(r"([0-9]+)([a-z]+)", flags=re.UNICODE)
RE_WHITESPACE = re.compile(r"(\s)+", re.UNICODE)
def remove_stopwords(s):
"""Remove :const:`~gensim.parsing.preprocessing.STOPWORDS` from `s`.
Parameters
----------
s : str
Returns
-------
str
Unicode string without :const:`~gensim.parsing.preprocessing.STOPWORDS`.
Examples
--------
>>> from gensim.parsing.preprocessing import remove_stopwords
>>> remove_stopwords("Better late than never, but better never late.")
u'Better late never, better late.'
"""
s = utils.to_unicode(s)
return " ".join(w for w in s.split() if w not in STOPWORDS)
def strip_punctuation(s):
"""Replace punctuation characters with spaces in `s` using :const:`~gensim.parsing.preprocessing.RE_PUNCT`.
Parameters
----------
s : str
Returns
-------
str
Unicode string without punctuation characters.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_punctuation
>>> strip_punctuation("A semicolon is a stronger break than a comma, but not as much as a full stop!")
u'A semicolon is a stronger break than a comma but not as much as a full stop '
"""
s = utils.to_unicode(s)
return RE_PUNCT.sub(" ", s)
strip_punctuation2 = strip_punctuation
def strip_tags(s):
"""Remove tags from `s` using :const:`~gensim.parsing.preprocessing.RE_TAGS`.
Parameters
----------
s : str
Returns
-------
str
Unicode string without tags.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_tags
>>> strip_tags("<i>Hello</i> <b>World</b>!")
u'Hello World!'
"""
s = utils.to_unicode(s)
return RE_TAGS.sub("", s)
def strip_short(s, minsize=3):
"""Remove words with length lesser than `minsize` from `s`.
Parameters
----------
s : str
minsize : int, optional
Returns
-------
str
Unicode string without short words.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_short
>>> strip_short("salut les amis du 59")
u'salut les amis'
>>>
>>> strip_short("one two three four five six seven eight nine ten", minsize=5)
u'three seven eight'
"""
s = utils.to_unicode(s)
return " ".join(e for e in s.split() if len(e) >= minsize)
def strip_numeric(s):
"""Remove digits from `s` using :const:`~gensim.parsing.preprocessing.RE_NUMERIC`.
Parameters
----------
s : str
Returns
-------
str
Unicode string without digits.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_numeric
>>> strip_numeric("0text24gensim365test")
u'textgensimtest'
"""
s = utils.to_unicode(s)
return RE_NUMERIC.sub("", s)
def strip_non_alphanum(s):
"""Remove non-alphabetic characters from `s` using :const:`~gensim.parsing.preprocessing.RE_NONALPHA`.
Parameters
----------
s : str
Returns
-------
str
Unicode string with alphabetic characters only.
Notes
-----
Word characters - alphanumeric & underscore.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_non_alphanum
>>> strip_non_alphanum("if-you#can%read$this&then@this#method^works")
u'if you can read this then this method works'
"""
s = utils.to_unicode(s)
return RE_NONALPHA.sub(" ", s)
def strip_multiple_whitespaces(s):
r"""Remove repeating whitespace characters (spaces, tabs, line breaks) from `s`
and turns tabs & line breaks into spaces using :const:`~gensim.parsing.preprocessing.RE_WHITESPACE`.
Parameters
----------
s : str
Returns
-------
str
Unicode string without repeating in a row whitespace characters.
Examples
--------
>>> from gensim.parsing.preprocessing import strip_multiple_whitespaces
>>> strip_multiple_whitespaces("salut" + '\r' + " les" + '\n' + " loulous!")
u'salut les loulous!'
"""
s = utils.to_unicode(s)
return RE_WHITESPACE.sub(" ", s)
def split_alphanum(s):
"""Add spaces between digits & letters in `s` using :const:`~gensim.parsing.preprocessing.RE_AL_NUM`.
Parameters
----------
s : str
Returns
-------
str
Unicode string with spaces between digits & letters.
Examples
--------
>>> from gensim.parsing.preprocessing import split_alphanum
>>> split_alphanum("24.0hours7 days365 a1b2c3")
u'24.0 hours 7 days 365 a 1 b 2 c 3'
"""
s = utils.to_unicode(s)
s = RE_AL_NUM.sub(r"\1 \2", s)
return RE_NUM_AL.sub(r"\1 \2", s)
def stem_text(text):
"""Transform `s` into lowercase and stem it.
Parameters
----------
text : str
Returns
-------
str
Unicode lowercased and porter-stemmed version of string `text`.
Examples
--------
>>> from gensim.parsing.preprocessing import stem_text
>>> stem_text("While it is quite useful to be able to search a large collection of documents almost instantly.")
u'while it is quit us to be abl to search a larg collect of document almost instantly.'
"""
text = utils.to_unicode(text)
p = PorterStemmer()
return ' '.join(p.stem(word) for word in text.split())
stem = stem_text
DEFAULT_FILTERS = [
lambda x: x.lower(), strip_tags, strip_punctuation,
strip_multiple_whitespaces, strip_numeric,
remove_stopwords, strip_short, stem_text
]
def preprocess_string(s, filters=DEFAULT_FILTERS):
"""Apply list of chosen filters to `s`.
Default list of filters:
* :func:`~gensim.parsing.preprocessing.strip_tags`,
* :func:`~gensim.parsing.preprocessing.strip_punctuation`,
* :func:`~gensim.parsing.preprocessing.strip_multiple_whitespaces`,
* :func:`~gensim.parsing.preprocessing.strip_numeric`,
* :func:`~gensim.parsing.preprocessing.remove_stopwords`,
* :func:`~gensim.parsing.preprocessing.strip_short`,
* :func:`~gensim.parsing.preprocessing.stem_text`.
Parameters
----------
s : str
filters: list of functions, optional
Returns
-------
list of str
Processed strings (cleaned).
Examples
--------
>>> from gensim.parsing.preprocessing import preprocess_string
>>> preprocess_string("<i>Hel 9lo</i> <b>Wo9 rld</b>! Th3 weather_is really g00d today, isn't it?")
[u'hel', u'rld', u'weather', u'todai', u'isn']
>>>
>>> s = "<i>Hel 9lo</i> <b>Wo9 rld</b>! Th3 weather_is really g00d today, isn't it?"
>>> CUSTOM_FILTERS = [lambda x: x.lower(), strip_tags, strip_punctuation]
>>> preprocess_string(s, CUSTOM_FILTERS)
[u'hel', u'9lo', u'wo9', u'rld', u'th3', u'weather', u'is', u'really', u'g00d', u'today', u'isn', u't', u'it']
"""
s = utils.to_unicode(s)
for f in filters:
s = f(s)
return s.split()
def preprocess_documents(docs):
"""Apply :const:`~gensim.parsing.preprocessing.DEFAULT_FILTERS` to the documents strings.
Parameters
----------
docs : list of str
Returns
-------
list of list of str
Processed documents split by whitespace.
Examples
--------
>>> from gensim.parsing.preprocessing import preprocess_documents
>>> preprocess_documents(["<i>Hel 9lo</i> <b>Wo9 rld</b>!", "Th3 weather_is really g00d today, isn't it?"])
[[u'hel', u'rld'], [u'weather', u'todai', u'isn']]
"""
return [preprocess_string(d) for d in docs]
def read_file(path):
with utils.smart_open(path) as fin:
return fin.read()
def read_files(pattern):
return [read_file(fname) for fname in glob.glob(pattern)]
| 12,675 | 31.839378 | 119 | py |
poincare_glove | poincare_glove-master/gensim/parsing/porter.py | #!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in [1]_, see also [2]_
Author - Vivake Gupta (v@nano.com), optimizations and cleanup of the code by Lars Buitinck.
Examples:
---------
>>> from gensim.parsing.porter import PorterStemmer
>>>
>>> p = PorterStemmer()
>>> p.stem("apple")
'appl'
>>>
>>> p.stem_sentence("Cats and ponies have meeting")
'cat and poni have meet'
>>>
>>> p.stem_documents(["Cats and ponies", "have meeting"])
['cat and poni', 'have meet']
.. [1] Porter, 1980, An algorithm for suffix stripping, http://www.cs.odu.edu/~jbollen/IR04/readings/readings5.pdf
.. [2] http://www.tartarus.org/~martin/PorterStemmer
"""
from six.moves import xrange
class PorterStemmer(object):
"""Class contains implementation of Porter stemming algorithm.
Attributes
--------
b : str
Buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[`k`].
k : int
Readjusted downwards as the stemming progresses.
j : int
Word length.
"""
def __init__(self):
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""Check if b[i] is a consonant letter.
Parameters
----------
i : int
Index for `b`.
Returns
-------
bool
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "hi"
>>> p._cons(1)
False
>>> p.b = "meow"
>>> p._cons(3)
True
"""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Calculate the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
Returns
-------
int
The number of consonant sequences between 0 and j.
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "<bm>aobm<ao>"
>>> p.j = 11
>>> p._m()
2
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""Check if b[0: j + 1] contains a vowel letter.
Returns
-------
bool
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "gnsm"
>>> p.j = 3
>>> p._vowelinstem()
False
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "gensim"
>>> p.j = 5
>>> p._vowelinstem()
True
"""
return not all(self._cons(i) for i in xrange(self.j + 1))
def _doublec(self, j):
"""Check if b[j - 1: j + 1] contain a double consonant letter.
Parameters
----------
j : int
Index for `b`
Returns
-------
bool
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "real"
>>> p.j = 3
>>> p._doublec(3)
False
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "really"
>>> p.j = 5
>>> p._doublec(4)
True
"""
return j > 0 and self.b[j] == self.b[j - 1] and self._cons(j)
def _cvc(self, i):
"""Check if b[j - 2: j + 1] makes the (consonant, vowel, consonant) pattern and also
if the second 'c' is not 'w', 'x' or 'y'. This is used when trying to restore an 'e' at the end of a short word,
e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray.
Parameters
----------
i : int
Index for `b`
Returns
-------
bool
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "lib"
>>> p.j = 2
>>> p._cvc(2)
True
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "dll"
>>> p.j = 2
>>> p._cvc(2)
False
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "wow"
>>> p.j = 2
>>> p._cvc(2)
False
"""
if i < 2 or not self._cons(i) or self._cons(i - 1) or not self._cons(i - 2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""Check if b[: k + 1] ends with `s`.
Parameters
----------
s : str
Returns
-------
bool
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "cowboy"
>>> p.j = 5
>>> p.k = 2
>>> p._ends("cow")
True
"""
if s[-1] != self.b[self.k]: # tiny speed-up
return False
length = len(s)
if length > (self.k + 1):
return False
if self.b[self.k - length + 1:self.k + 1] != s:
return False
self.j = self.k - length
return True
def _setto(self, s):
"""Append `s` to `b`, adjusting `k`.
Parameters
----------
s : str
"""
self.b = self.b[:self.j + 1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"):
self._setto("ate")
elif self._ends("bl"):
self._setto("ble")
elif self._ends("iz"):
self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal 'y' to 'i' when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"):
self._r("ate")
elif self._ends("tional"):
self._r("tion")
elif ch == 'c':
if self._ends("enci"):
self._r("ence")
elif self._ends("anci"):
self._r("ance")
elif ch == 'e':
if self._ends("izer"):
self._r("ize")
elif ch == 'l':
if self._ends("bli"):
self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"):
self._r("al")
elif self._ends("entli"):
self._r("ent")
elif self._ends("eli"):
self._r("e")
elif self._ends("ousli"):
self._r("ous")
elif ch == 'o':
if self._ends("ization"):
self._r("ize")
elif self._ends("ation"):
self._r("ate")
elif self._ends("ator"):
self._r("ate")
elif ch == 's':
if self._ends("alism"):
self._r("al")
elif self._ends("iveness"):
self._r("ive")
elif self._ends("fulness"):
self._r("ful")
elif self._ends("ousness"):
self._r("ous")
elif ch == 't':
if self._ends("aliti"):
self._r("al")
elif self._ends("iviti"):
self._r("ive")
elif self._ends("biliti"):
self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"):
self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"):
self._r("ic")
elif self._ends("ative"):
self._r("")
elif self._ends("alize"):
self._r("al")
elif ch == 'i':
if self._ends("iciti"):
self._r("ic")
elif ch == 'l':
if self._ends("ical"):
self._r("ic")
elif self._ends("ful"):
self._r("")
elif ch == 's':
if self._ends("ness"):
self._r("")
def _step4(self):
"""Takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"):
return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"):
return
elif ch == 'e':
if not self._ends("er"):
return
elif ch == 'i':
if not self._ends("ic"):
return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"):
return
elif ch == 'n':
if self._ends("ant"):
pass
elif self._ends("ement"):
pass
elif self._ends("ment"):
pass
elif self._ends("ent"):
pass
else:
return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st":
pass
elif self._ends("ou"):
pass
# takes care of -ous
else:
return
elif ch == 's':
if not self._ends("ism"):
return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"):
return
elif ch == 'u':
if not self._ends("ous"):
return
elif ch == 'v':
if not self._ends("ive"):
return
elif ch == 'z':
if not self._ends("ize"):
return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1."""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word `w`.
Parameters
----------
w : str
Returns
-------
str
Stemmed version of `w`.
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem("ponies")
'poni'
"""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k + 1]
def stem_sentence(self, txt):
"""Stem the sentence `txt`.
Parameters
----------
txt : str
Input sentence.
Returns
-------
str
Stemmed sentence.
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem_sentence("Wow very nice woman with apple")
'wow veri nice woman with appl'
"""
return " ".join(self.stem(x) for x in txt.split())
def stem_documents(self, docs):
"""Stem documents.
Parameters
----------
docs : list of str
Input documents
Returns
-------
list of str
Stemmed documents.
Examples
--------
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem_documents(["Have a very nice weekend", "Have a very nice weekend"])
['have a veri nice weekend', 'have a veri nice weekend']
"""
return [self.stem_sentence(x) for x in docs]
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print(p.stem_sentence(line))
| 15,333 | 25.483592 | 120 | py |
poincare_glove | poincare_glove-master/gensim/parsing/__init__.py | """This package contains functions to preprocess raw text"""
from .porter import PorterStemmer # noqa:F401
from .preprocessing import (remove_stopwords, strip_punctuation, strip_punctuation2, # noqa:F401
strip_tags, strip_short, strip_numeric,
strip_non_alphanum, strip_multiple_whitespaces,
split_alphanum, stem_text, preprocess_string,
preprocess_documents, read_file, read_files)
| 498 | 54.444444 | 97 | py |
poincare_glove | poincare_glove-master/gensim/viz/poincare.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Utilities for creating 2-D visualizations of Poincare models and Poincare distance heatmaps.
"""
import logging
from collections import Counter
import numpy as np
import plotly.graph_objs as go
from gensim.models.poincare import PoincareKeyedVectors
logger = logging.getLogger(__name__)
def poincare_2d_visualization(model, tree, figure_title, num_nodes=50, show_node_labels=()):
"""Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.
Parameters
----------
model : :class:`~gensim.models.poincare.PoincareModel`
The model to visualize, model size must be 2.
tree : set
Set of tuples containing the direct edges present in the original dataset.
figure_title : str
Title of the plotted figure.
num_nodes : int or None
Number of nodes for which edges are to be plotted.
If `None`, all edges are plotted.
Helpful to limit this in case the data is too large to avoid a messy plot.
show_node_labels : iterable
Iterable of nodes for which to show labels by default.
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot.
"""
vectors = model.kv.syn0
if vectors.shape[1] != 2:
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index2word
nodes_x = list(vectors[:, 0])
nodes_y = list(vectors[:, 1])
nodes = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers',
marker=dict(color='rgb(30, 100, 200)'),
text=node_labels,
textposition='bottom'
)
nodes_x, nodes_y, node_labels = [], [], []
for node in show_node_labels:
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers+text',
marker=dict(color='rgb(200, 100, 200)'),
text=node_labels,
textposition='bottom'
)
node_out_degrees = Counter(hypernym_pair[1] for hypernym_pair in tree)
if num_nodes is None:
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=lambda k: -node_out_degrees[k]))[:num_nodes]
edges_x = []
edges_y = []
for u, v in tree:
if not(u in chosen_nodes or v in chosen_nodes):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = go.Scatter(
x=edges_x, y=edges_y, mode="line", hoverinfo=False,
line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(
title=figure_title, showlegend=False, hovermode='closest', width=800, height=800)
return go.Figure(data=[edges, nodes, nodes_with_labels], layout=layout)
def poincare_distance_heatmap(origin_point, x_range=(-1.0, 1.0), y_range=(-1.0, 1.0), num_points=100):
"""Create a heatmap of Poincare distances from `origin_point` for each point (x, y),
where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.
Parameters
----------
origin_point : tuple (int, int)
(x, y) from which distances are to be measured and plotted.
x_range : tuple (int, int)
Range for x-axis from which to choose `num_points` points.
y_range : tuple (int, int)
Range for y-axis from which to choose `num_points` points.
num_points : int
Number of points to choose from `x_range` and `y_range`.
Notes
-----
Points outside the unit circle are ignored, since the Poincare distance is defined
only for points inside the circle boundaries (exclusive of the boundary).
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot
"""
epsilon = 1e-8 # Can't choose (-1.0, -1.0) or (1.0, 1.0), distance undefined
x_range, y_range = list(x_range), list(y_range)
if x_range[0] == -1.0 and y_range[0] == -1.0:
x_range[0] += epsilon
y_range[0] += epsilon
if x_range[0] == 1.0 and y_range[0] == 1.0:
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
x, y = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, num_points ** 2).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[norms < 1]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(
x=all_points[:, 0],
y=all_points[:, 1],
mode='markers',
marker=dict(
size='9',
color=all_distances,
colorscale='Viridis',
showscale=True,
colorbar=go.ColorBar(
title='Poincare Distance'
),
),
text=[
'Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)
for d in all_distances],
name='', # To avoid the default 'trace 0'
)
origin = go.Scatter(
x=[origin_point[0]],
y=[origin_point[1]],
name='Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
mode='markers+text',
marker=dict(
size='10',
color='rgb(200, 50, 50)'
)
)
layout = go.Layout(
width=900,
height=800,
showlegend=False,
title='Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
hovermode='closest',
)
return go.Figure(data=[distances, origin], layout=layout)
| 6,152 | 31.903743 | 120 | py |
poincare_glove | poincare_glove-master/gensim/viz/__init__.py | """
This package contains functions to visualize different models from `gensim.models`.
"""
| 92 | 22.25 | 83 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/direct_confirmation_measure.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions to compute direct confirmation on a pair of words or word subsets."""
import logging
import numpy as np
logger = logging.getLogger(__name__)
# Should be small. Value as suggested in paper http://svn.aksw.org/papers/2015/WSDM_Topic_Evaluation/public.pdf
EPSILON = 1e-12
def log_conditional_probability(segmented_topics, accumulator, with_std=False, with_support=False):
"""Calculate the log-conditional-probability measure which is used by coherence measures such as `U_mass`.
This is defined as :math:`m_{lc}(S_i) = log \\frac{P(W', W^{*}) + \epsilon}{P(W^{*})}`.
Parameters
----------
segmented_topics : list of lists of (int, int)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_pre`,
:func:`~gensim.topic_coherence.segmentation.s_one_one`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from :mod:`gensim.topic_coherence.probability_estimation`.
with_std : bool, optional
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as the number of pairwise
similarity comparisons were used to compute the overall topic coherence.
Returns
-------
list of float
Log conditional probabilities measurement for each topic.
Examples
--------
>>> from gensim.topic_coherence import direct_confirmation_measure, text_analysis
>>> from collections import namedtuple
>>>
>>> # Create dictionary
>>> id2token = {1: 'test', 2: 'doc'}
>>> token2id = {v: k for k, v in id2token.items()}
>>> dictionary = namedtuple('Dictionary', 'token2id, id2token')(token2id, id2token)
>>>
>>> # Initialize segmented topics and accumulator
>>> segmentation = [[(1, 2)]]
>>>
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # result should be ~ ln(1 / 2) = -0.693147181
>>> result = direct_confirmation_measure.log_conditional_probability(segmentation, accumulator)[0]
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
try:
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
m_lc_i = np.log(((co_occur_count / num_docs) + EPSILON) / (w_star_count / num_docs))
except KeyError:
m_lc_i = 0.0
segment_sims.append(m_lc_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
def aggregate_segment_sims(segment_sims, with_std, with_support):
"""Compute various statistics from the segment similarities generated via set pairwise comparisons
of top-N word lists for a single topic.
Parameters
----------
segment_sims : iterable of float
Similarity values to aggregate.
with_std : bool
Set to True to include standard deviation.
with_support : bool
Set to True to include number of elements in `segment_sims` as a statistic in the results returned.
Returns
-------
(float[, float[, int]])
Tuple with (mean[, std[, support]]).
Examples
---------
>>> from gensim.topic_coherence import direct_confirmation_measure
>>>
>>> segment_sims = [0.2, 0.5, 1., 0.05]
>>> direct_confirmation_measure.aggregate_segment_sims(segment_sims, True, True)
(0.4375, 0.36293077852394939, 4)
>>> direct_confirmation_measure.aggregate_segment_sims(segment_sims, False, False)
0.4375
"""
mean = np.mean(segment_sims)
stats = [mean]
if with_std:
stats.append(np.std(segment_sims))
if with_support:
stats.append(len(segment_sims))
return stats[0] if len(stats) == 1 else tuple(stats)
def log_ratio_measure(segmented_topics, accumulator, normalize=False, with_std=False, with_support=False):
"""Compute log ratio measure for `segment_topics`.
Parameters
----------
segmented_topics : list of lists of (int, int)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_pre`,
:func:`~gensim.topic_coherence.segmentation.s_one_one`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from :mod:`gensim.topic_coherence.probability_estimation`.
normalize : bool, optional
Details in the "Notes" section.
with_std : bool, optional
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as the number of pairwise
similarity comparisons were used to compute the overall topic coherence.
Notes
-----
If `normalize=False`:
Calculate the log-ratio-measure, popularly known as **PMI** which is used by coherence measures such as `c_v`.
This is defined as :math:`m_{lr}(S_i) = log \\frac{P(W', W^{*}) + \epsilon}{P(W') * P(W^{*})}`
If `normalize=True`:
Calculate the normalized-log-ratio-measure, popularly knowns as **NPMI**
which is used by coherence measures such as `c_v`.
This is defined as :math:`m_{nlr}(S_i) = \\frac{m_{lr}(S_i)}{-log(P(W', W^{*}) + \epsilon)}`
Returns
-------
list of float
Log ratio measurements for each topic.
Examples
--------
>>> from gensim.topic_coherence import direct_confirmation_measure, text_analysis
>>> from collections import namedtuple
>>>
>>> # Create dictionary
>>> id2token = {1: 'test', 2: 'doc'}
>>> token2id = {v: k for k, v in id2token.items()}
>>> dictionary = namedtuple('Dictionary', 'token2id, id2token')(token2id, id2token)
>>>
>>> # Initialize segmented topics and accumulator
>>> segmentation = [[(1, 2)]]
>>>
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # result should be ~ ln{(1 / 5) / [(3 / 5) * (2 / 5)]} = -0.182321557
>>> result = direct_confirmation_measure.log_ratio_measure(segmentation, accumulator)[0]
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
w_prime_count = accumulator[w_prime]
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
if normalize:
# For normalized log ratio measure
numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0]
co_doc_prob = co_occur_count / num_docs
m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))
else:
# For log ratio measure without normalization
numerator = (co_occur_count / num_docs) + EPSILON
denominator = (w_prime_count / num_docs) * (w_star_count / num_docs)
m_lr_i = np.log(numerator / denominator)
segment_sims.append(m_lr_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
| 7,953 | 38.376238 | 118 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/aggregation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions to perform aggregation on a list of values obtained from the confirmation measure."""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def arithmetic_mean(confirmed_measures):
"""
Perform the arithmetic mean aggregation on the output obtained from
the confirmation measure module.
Parameters
----------
confirmed_measures : list of float
List of calculated confirmation measure on each set in the segmented topics.
Returns
-------
`numpy.float`
Arithmetic mean of all the values contained in confirmation measures.
Examples
--------
>>> from gensim.topic_coherence.aggregation import arithmetic_mean
>>> arithmetic_mean([1.1, 2.2, 3.3, 4.4])
2.75
"""
return np.mean(confirmed_measures)
| 1,014 | 25.710526 | 119 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/text_analysis.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains classes for analyzing the texts of a corpus to accumulate
statistical information about word occurrences."""
import itertools
import logging
import multiprocessing as mp
import sys
from collections import Counter
import numpy as np
import scipy.sparse as sps
from six import iteritems, string_types
from gensim import utils
from gensim.models.word2vec import Word2Vec
logger = logging.getLogger(__name__)
def _ids_to_words(ids, dictionary):
"""Convert an iterable of ids to their corresponding words using a dictionary.
Abstract away the differences between the HashDictionary and the standard one.
Parameters
----------
ids: dict
Dictionary of ids and their words.
dictionary: :class:`~gensim.corpora.dictionary.Dictionary`
Input gensim dictionary
Returns
-------
set
Corresponding words.
Examples
--------
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import text_analysis
>>>
>>> dictionary = Dictionary()
>>> ids = {1: 'fake', 4: 'cats'}
>>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'}
>>>
>>> text_analysis._ids_to_words(ids, dictionary)
set(['cats', 'fake'])
"""
if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary
setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()})
top_words = set()
for word_id in ids:
word = dictionary.id2token[word_id]
if isinstance(word, set):
top_words = top_words.union(word)
else:
top_words.add(word)
return top_words
class BaseAnalyzer(object):
"""Base class for corpus and text analyzers.
Attributes
----------
relevant_ids : dict
Mapping
_vocab_size : int
Size of vocabulary.
id2contiguous : dict
Mapping word_id -> number.
log_every : int
Interval for logging.
_num_docs : int
Number of documents.
"""
def __init__(self, relevant_ids):
"""
Parameters
----------
relevant_ids : dict
Mapping
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>> ids = {1: 'fake', 4: 'cats'}
>>> base = text_analysis.BaseAnalyzer(ids)
>>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
>>> print base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs
{1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
"""
self.relevant_ids = relevant_ids
self._vocab_size = len(self.relevant_ids)
self.id2contiguous = {word_id: n for n, word_id in enumerate(self.relevant_ids)}
self.log_every = 1000
self._num_docs = 0
@property
def num_docs(self):
return self._num_docs
@num_docs.setter
def num_docs(self, num):
self._num_docs = num
if self._num_docs % self.log_every == 0:
logger.info(
"%s accumulated stats from %d documents",
self.__class__.__name__, self._num_docs)
def analyze_text(self, text, doc_num=None):
raise NotImplementedError("Base classes should implement analyze_text.")
def __getitem__(self, word_or_words):
if isinstance(word_or_words, string_types) or not hasattr(word_or_words, '__iter__'):
return self.get_occurrences(word_or_words)
else:
return self.get_co_occurrences(*word_or_words)
def get_occurrences(self, word_id):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
return self._get_occurrences(self.id2contiguous[word_id])
def _get_occurrences(self, word_id):
raise NotImplementedError("Base classes should implement occurrences")
def get_co_occurrences(self, word_id1, word_id2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2])
def _get_co_occurrences(self, word_id1, word_id2):
raise NotImplementedError("Base classes should implement co_occurrences")
class UsesDictionary(BaseAnalyzer):
"""A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.
The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id
mapping.
Attributes
----------
relevant_words : set
Set of words that occurrences should be accumulated for.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
token2id : dict
Mapping from :class:`~gensim.corpora.dictionary.Dictionary`
"""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : dict
Mapping
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> ids = {1: 'foo', 2: 'bar'}
>>> dictionary = Dictionary([['foo','bar','baz'], ['foo','bar','bar','baz']])
>>> udict = text_analysis.UsesDictionary(ids, dictionary)
>>>
>>> print udict.relevant_words
set([u'foo', u'baz'])
"""
super(UsesDictionary, self).__init__(relevant_ids)
self.relevant_words = _ids_to_words(self.relevant_ids, dictionary)
self.dictionary = dictionary
self.token2id = dictionary.token2id
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self._get_occurrences(self.id2contiguous[word_id])
def _word2_contiguous_id(self, word):
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self.id2contiguous[word_id]
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
word_id1 = self._word2_contiguous_id(word1)
word_id2 = self._word2_contiguous_id(word2)
return self._get_co_occurrences(word_id1, word_id2)
class InvertedIndexBased(BaseAnalyzer):
"""Analyzer that builds up an inverted index to accumulate stats."""
def __init__(self, *args):
"""
Parameters
----------
args : dict
Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>>
>>> ids = {1: 'fake', 4: 'cats'}
>>> ininb = text_analysis.InvertedIndexBased(ids)
>>>
>>> print ininb._inverted_index
[set([]) set([])]
"""
super(InvertedIndexBased, self).__init__(*args)
self._inverted_index = np.array([set() for _ in range(self._vocab_size)])
def _get_occurrences(self, word_id):
return len(self._inverted_index[word_id])
def _get_co_occurrences(self, word_id1, word_id2):
s1 = self._inverted_index[word_id1]
s2 = self._inverted_index[word_id2]
return len(s1.intersection(s2))
def index_to_dict(self):
contiguous2id = {n: word_id for word_id, n in iteritems(self.id2contiguous)}
return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)}
class CorpusAccumulator(InvertedIndexBased):
"""Gather word occurrence stats from a corpus by iterating over its BoW representation."""
def analyze_text(self, text, doc_num=None):
"""Build an inverted index from a sequence of corpus texts."""
doc_words = frozenset(x[0] for x in text)
top_ids_in_doc = self.relevant_ids.intersection(doc_words)
for word_id in top_ids_in_doc:
self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)
def accumulate(self, corpus):
for document in corpus:
self.analyze_text(document)
self.num_docs += 1
return self
class WindowedTextsAnalyzer(UsesDictionary):
"""Gather some stats about relevant terms of a corpus by iterating over windows of texts."""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : set of int
Relevant id
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary instance with mappings for the relevant_ids.
"""
super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary)
self._none_token = self._vocab_size # see _iter_texts for use of none token
def accumulate(self, texts, window_size):
relevant_texts = self._iter_texts(texts)
windows = utils.iter_windows(
relevant_texts, window_size, ignore_below_size=False, include_doc_num=True)
for doc_num, virtual_document in windows:
self.analyze_text(virtual_document, doc_num)
self.num_docs += 1
return self
def _iter_texts(self, texts):
dtype = np.uint16 if np.iinfo(np.uint16).max >= self._vocab_size else np.uint32
for text in texts:
if self.text_is_relevant(text):
yield np.array([
self.id2contiguous[self.token2id[w]] if w in self.relevant_words
else self._none_token
for w in text], dtype=dtype)
def text_is_relevant(self, text):
"""Check if the text has any relevant words."""
for word in text:
if word in self.relevant_words:
return True
return False
class InvertedIndexAccumulator(WindowedTextsAnalyzer, InvertedIndexBased):
"""Build an inverted index from a sequence of corpus texts."""
def analyze_text(self, window, doc_num=None):
for word_id in window:
if word_id is not self._none_token:
self._inverted_index[word_id].add(self._num_docs)
class WordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences and co-occurrences from a sequence of corpus texts."""
def __init__(self, *args):
super(WordOccurrenceAccumulator, self).__init__(*args)
self._occurrences = np.zeros(self._vocab_size, dtype='uint32')
self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32')
self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token
self._counter = Counter()
def __str__(self):
return self.__class__.__name__
def accumulate(self, texts, window_size):
self._co_occurrences = self._co_occurrences.tolil()
self.partial_accumulate(texts, window_size)
self._symmetrize()
return self
def partial_accumulate(self, texts, window_size):
"""Meant to be called several times to accumulate partial results.
Notes
-----
The final accumulation should be performed with the `accumulate` method as opposed to this one.
This method does not ensure the co-occurrence matrix is in lil format and does not
symmetrize it after accumulation.
"""
self._current_doc_num = -1
self._token_at_edge = None
self._counter.clear()
super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)
for combo, count in iteritems(self._counter):
self._co_occurrences[combo] += count
return self
def analyze_text(self, window, doc_num=None):
self._slide_window(window, doc_num)
mask = self._uniq_words[:-1] # to exclude none token
if mask.any():
self._occurrences[mask] += 1
self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))
def _slide_window(self, window, doc_num):
if doc_num != self._current_doc_num:
self._uniq_words[:] = False
self._uniq_words[np.unique(window)] = True
self._current_doc_num = doc_num
else:
self._uniq_words[self._token_at_edge] = False
self._uniq_words[window[-1]] = True
self._token_at_edge = window[0]
def _symmetrize(self):
"""Word pairs may have been encountered in (i, j) and (j, i) order.
Notes
-----
Rather than enforcing a particular ordering during the update process,
we choose to symmetrize the co-occurrence matrix after accumulation has completed.
"""
co_occ = self._co_occurrences
co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts
self._co_occurrences = \
co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')
def _get_occurrences(self, word_id):
return self._occurrences[word_id]
def _get_co_occurrences(self, word_id1, word_id2):
return self._co_occurrences[word_id1, word_id2]
def merge(self, other):
self._occurrences += other._occurrences
self._co_occurrences += other._co_occurrences
self._num_docs += other._num_docs
class PatchedWordOccurrenceAccumulator(WordOccurrenceAccumulator):
"""Monkey patched for multiprocessing worker usage, to move some of the logic to the master process."""
def _iter_texts(self, texts):
return texts # master process will handle this
class ParallelWordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences in parallel.
Attributes
----------
processes : int
Number of processes to use; must be at least two.
args :
Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`).
kwargs :
Can include `batch_size`, which is the number of docs to send to a worker at a time.
If not included, it defaults to 64.
"""
def __init__(self, processes, *args, **kwargs):
super(ParallelWordOccurrenceAccumulator, self).__init__(*args)
if processes < 2:
raise ValueError(
"Must have at least 2 processes to run in parallel; got %d" % processes)
self.processes = processes
self.batch_size = kwargs.get('batch_size', 64)
def __str__(self):
return "%s(processes=%s, batch_size=%s)" % (
self.__class__.__name__, self.processes, self.batch_size)
def accumulate(self, texts, window_size):
workers, input_q, output_q = self.start_workers(window_size)
try:
self.queue_all_texts(input_q, texts, window_size)
interrupted = False
except KeyboardInterrupt:
logger.warn("stats accumulation interrupted; <= %d documents processed", self._num_docs)
interrupted = True
accumulators = self.terminate_workers(input_q, output_q, workers, interrupted)
return self.merge_accumulators(accumulators)
def start_workers(self, window_size):
"""Set up an input and output queue and start processes for each worker.
Notes
-----
The input queue is used to transmit batches of documents to the workers.
The output queue is used by workers to transmit the WordOccurrenceAccumulator instances.
Parameters
----------
window_size : int
Returns
-------
(list of lists)
Tuple of (list of workers, input queue, output queue).
"""
input_q = mp.Queue(maxsize=self.processes)
output_q = mp.Queue()
workers = []
for _ in range(self.processes):
accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)
worker.start()
workers.append(worker)
return workers, input_q, output_q
def yield_batches(self, texts):
"""Return a generator over the given texts that yields batches of `batch_size` texts at a time."""
batch = []
for text in self._iter_texts(texts):
batch.append(text)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def queue_all_texts(self, q, texts, window_size):
"""Sequentially place batches of texts on the given queue until `texts` is consumed.
The texts are filtered so that only those with at least one relevant token are queued.
"""
for batch_num, batch in enumerate(self.yield_batches(texts)):
q.put(batch, block=True)
before = self._num_docs / self.log_every
self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)
if before < (self._num_docs / self.log_every):
logger.info(
"%d batches submitted to accumulate stats from %d documents (%d virtual)",
(batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)
def terminate_workers(self, input_q, output_q, workers, interrupted=False):
"""Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each.
Warnings
--------
We do not use join here because it has been shown to have some issues
in Python 2.7 (and even in later versions). This method also closes both the input and output queue.
If `interrupted` is False (normal execution), a None value is placed on the input queue for
each worker. The workers are looking for this sentinel value and interpret it as a signal to
terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are
programmed to recover from this and continue on to transmit their results before terminating.
So in this instance, the sentinel values are not queued, but the rest of the execution
continues as usual.
"""
if not interrupted:
for _ in workers:
input_q.put(None, block=True)
accumulators = []
while len(accumulators) != len(workers):
accumulators.append(output_q.get())
logger.info("%d accumulators retrieved from output queue", len(accumulators))
for worker in workers:
if worker.is_alive():
worker.terminate()
input_q.close()
output_q.close()
return accumulators
def merge_accumulators(self, accumulators):
"""Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all
occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed
by all the individual accumulators.
"""
accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
for other_accumulator in accumulators:
accumulator.merge(other_accumulator)
# Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized.
# This is by design, to avoid unnecessary matrix additions/conversions during accumulation.
accumulator._symmetrize()
logger.info("accumulated word occurrence stats for %d virtual documents", accumulator.num_docs)
return accumulator
class AccumulatingWorker(mp.Process):
"""Accumulate stats from texts fed in from queue."""
def __init__(self, input_q, output_q, accumulator, window_size):
super(AccumulatingWorker, self).__init__()
self.input_q = input_q
self.output_q = output_q
self.accumulator = accumulator
self.accumulator.log_every = sys.maxsize # avoid logging in workers
self.window_size = window_size
def run(self):
try:
self._run()
except KeyboardInterrupt:
logger.info(
"%s interrupted after processing %d documents",
self.__class__.__name__, self.accumulator.num_docs)
except Exception:
logger.exception("worker encountered unexpected exception")
finally:
self.reply_to_master()
def _run(self):
batch_num = -1
n_docs = 0
while True:
batch_num += 1
docs = self.input_q.get(block=True)
if docs is None: # sentinel value
logger.debug("observed sentinel value; terminating")
break
self.accumulator.partial_accumulate(docs, self.window_size)
n_docs += len(docs)
logger.debug(
"completed batch %d; %d documents processed (%d virtual)",
batch_num, n_docs, self.accumulator.num_docs)
logger.debug(
"finished all batches; %d documents processed (%d virtual)",
n_docs, self.accumulator.num_docs)
def reply_to_master(self):
logger.info("serializing accumulator to return to master...")
self.output_q.put(self.accumulator, block=False)
logger.info("accumulator serialized")
class WordVectorsAccumulator(UsesDictionary):
"""Accumulate context vectors for words using word vector embeddings.
Attributes
----------
model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`)
If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
it should be a pre-trained Word2Vec context vectors.
model_kwargs:
if model is None, these keyword arguments will be passed through to the Word2Vec constructor.
"""
def __init__(self, relevant_ids, dictionary, model=None, **model_kwargs):
super(WordVectorsAccumulator, self).__init__(relevant_ids, dictionary)
self.model = model
self.model_kwargs = model_kwargs
def not_in_vocab(self, words):
uniq_words = set(utils.flatten(words))
return set(word for word in uniq_words if word not in self.model.vocab)
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
self.token2id[word] # is this a token or an id?
except KeyError:
word = self.dictionary.id2token[word]
return self.model.vocab[word].count
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
raise NotImplementedError("Word2Vec model does not support co-occurrence counting")
def accumulate(self, texts, window_size):
if self.model is not None:
logger.debug("model is already trained; no accumulation necessary")
return self
kwargs = self.model_kwargs.copy()
if window_size is not None:
kwargs['window'] = window_size
kwargs['min_count'] = kwargs.get('min_count', 1)
kwargs['sg'] = kwargs.get('sg', 1)
kwargs['hs'] = kwargs.get('hw', 0)
self.model = Word2Vec(**kwargs)
self.model.build_vocab(texts)
self.model.train(texts, total_examples=self.model.corpus_count, epochs=self.model.iter)
self.model = self.model.wv # retain KeyedVectors
return self
def ids_similarity(self, ids1, ids2):
words1 = self._words_with_embeddings(ids1)
words2 = self._words_with_embeddings(ids2)
return self.model.n_similarity(words1, words2)
def _words_with_embeddings(self, ids):
if not hasattr(ids, '__iter__'):
ids = [ids]
words = [self.dictionary.id2token[word_id] for word_id in ids]
return [word for word in words if word in self.model.vocab]
| 24,297 | 35.871017 | 114 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/indirect_confirmation_measure.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
r"""This module contains functions to compute confirmation on a pair of words or word subsets.
Notes
-----
The advantage of indirect confirmation measure is that it computes similarity of words in :math:`W'` and
:math:`W^{*}` with respect to direct confirmations to all words. Eg. Suppose `x` and `z` are both competing
brands of cars, which semantically support each other. However, both brands are seldom mentioned
together in documents in the reference corpus. But their confirmations to other words like “road”
or “speed” do strongly correlate. This would be reflected by an indirect confirmation measure.
Thus, indirect confirmation measures may capture semantic support that direct measures would miss.
The formula used to compute indirect confirmation measure is
.. math::
\widetilde{m}_{sim(m, \gamma)}(W', W^{*}) = s_{sim}(\vec{v}^{\,}_{m,\gamma}(W'), \vec{v}^{\,}_{m,\gamma}(W^{*}))
where :math:`s_{sim}` can be cosine, dice or jaccard similarity and
.. math::
\vec{v}^{\,}_{m,\gamma}(W') = \Bigg \{{\sum_{w_{i} \in W'}^{ } m(w_{i}, w_{j})^{\gamma}}\Bigg \}_{j = 1,...,|W|}
"""
import itertools
import logging
import numpy as np
import scipy.sparse as sps
from gensim.topic_coherence.direct_confirmation_measure import aggregate_segment_sims, log_ratio_measure
logger = logging.getLogger(__name__)
def word2vec_similarity(segmented_topics, accumulator, with_std=False, with_support=False):
"""For each topic segmentation, compute average cosine similarity using a
:class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`.
Parameters
----------
segmented_topics : list of lists of (int, `numpy.ndarray`)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_set`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator.
with_std : bool, optional
True to also include standard deviation across topic segment sets
in addition to the mean coherence for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as
the number of pairwise similarity comparisons were used to compute the overall topic coherence.
Returns
-------
list of (float[, float[, int]])
Сosine word2vec similarities per topic (with std/support if `with_std`, `with_support`).
Examples
--------
>>> import numpy as np
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure
>>> from gensim.topic_coherence import text_analysis
>>>
>>> # create segmentation
>>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'],['tokens', 'fake']], 5)
>>>
>>> # should be (0.726752426218 0.00695475919227)
>>> mean, std = indirect_confirmation_measure.word2vec_similarity(segmentation, accumulator, with_std=True)[0]
"""
topic_coherences = []
total_oov = 0
for topic_index, topic_segments in enumerate(segmented_topics):
segment_sims = []
num_oov = 0
for w_prime, w_star in topic_segments:
if not hasattr(w_prime, '__iter__'):
w_prime = [w_prime]
if not hasattr(w_star, '__iter__'):
w_star = [w_star]
try:
segment_sims.append(accumulator.ids_similarity(w_prime, w_star))
except ZeroDivisionError:
num_oov += 1
if num_oov > 0:
total_oov += 1
logger.warning(
"%d terms for topic %d are not in word2vec model vocabulary",
num_oov, topic_index)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
if total_oov > 0:
logger.warning("%d terms for are not in word2vec model vocabulary", total_oov)
return topic_coherences
def cosine_similarity(segmented_topics, accumulator, topics, measure='nlr',
gamma=1, with_std=False, with_support=False):
"""Calculate the indirect cosine measure.
Parameters
----------
segmented_topics: list of lists of (int, `numpy.ndarray`)
Output from the segmentation module of the segmented topics.
accumulator: :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Output from the probability_estimation module. Is an topics: Topics obtained from the trained topic model.
measure : str, optional
Direct confirmation measure to be used. Supported values are "nlr" (normalized log ratio).
gamma: float, optional
Gamma value for computing :math:`W'` and :math:`W^{*}` vectors.
with_std : bool
True to also include standard deviation across topic segment sets in addition to the mean coherence
for each topic; default is False.
with_support : bool
True to also include support across topic segments. The support is defined as the number of pairwise similarity
comparisons were used to compute the overall topic coherence.
Returns
-------
list
List of indirect cosine similarity measure for each topic.
Examples
--------
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure, text_analysis
>>> import numpy as np
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
>>> accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
>>> accumulator._num_docs = 5
>>>
>>> # create topics
>>> topics = [np.array([1, 2])]
>>>
>>> # create segmentation
>>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
>>> obtained = indirect_confirmation_measure.cosine_similarity(segmentation, accumulator, topics, 'nlr', 1)
>>> print obtained[0]
0.623018926945
"""
context_vectors = ContextVectorComputer(measure, topics, accumulator, gamma)
topic_coherences = []
for topic_words, topic_segments in zip(topics, segmented_topics):
topic_words = tuple(topic_words) # because tuples are hashable
segment_sims = np.zeros(len(topic_segments))
for i, (w_prime, w_star) in enumerate(topic_segments):
w_prime_cv = context_vectors[w_prime, topic_words]
w_star_cv = context_vectors[w_star, topic_words]
segment_sims[i] = _cossim(w_prime_cv, w_star_cv)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences
class ContextVectorComputer(object):
"""Lazily compute context vectors for topic segments.
Parameters
----------
measure: str
Confirmation measure.
topics: list of numpy.array
Topics.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
gamma: float
Value for computing vectors.
Attributes
----------
sim_cache: dict
Cache similarities between tokens (pairs of word ids), e.g. (1, 2).
context_vector_cache: dict
Mapping from (segment, topic_words) --> context_vector.
Example
-------
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure, text_analysis
>>> import numpy as np
>>>
>>> # create measure, topics
>>> measure = 'nlr'
>>> topics = [np.array([1, 2])]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'],['tokens', 'fake']], 5)
>>> cont_vect_comp = indirect_confirmation_measure.ContextVectorComputer(measure, topics, accumulator, 1)
>>> cont_vect_comp.mapping
{1: 0, 2: 1}
>>> cont_vect_comp.vocab_size
2
"""
def __init__(self, measure, topics, accumulator, gamma):
if measure == 'nlr':
self.similarity = _pair_npmi
else:
raise ValueError(
"The direct confirmation measure you entered is not currently supported.")
self.mapping = _map_to_contiguous(topics)
self.vocab_size = len(self.mapping)
self.accumulator = accumulator
self.gamma = gamma
self.sim_cache = {}
self.context_vector_cache = {}
def __getitem__(self, idx):
return self.compute_context_vector(*idx)
def compute_context_vector(self, segment_word_ids, topic_word_ids):
"""Check if (segment_word_ids, topic_word_ids) context vector has been cached.
Parameters
----------
segment_word_ids: list
Ids of words in segment.
topic_word_ids: list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
If context vector has been cached, then return corresponding context vector,
else compute, cache, and return.
"""
key = _key_for_segment(segment_word_ids, topic_word_ids)
context_vector = self.context_vector_cache.get(key, None)
if context_vector is None:
context_vector = self._make_seg(segment_word_ids, topic_word_ids)
self.context_vector_cache[key] = context_vector
return context_vector
def _make_seg(self, segment_word_ids, topic_word_ids):
"""Return context vectors for segmentation (Internal helper function).
Parameters
----------
segment_word_ids : iterable or int
Ids of words in segment.
topic_word_ids : list
Ids of words in topic.
Returns
-------
csr_matrix :class:`~scipy.sparse.csr`
Matrix in Compressed Sparse Row format
"""
context_vector = sps.lil_matrix((self.vocab_size, 1))
if not hasattr(segment_word_ids, '__iter__'):
segment_word_ids = (segment_word_ids,)
for w_j in topic_word_ids:
idx = (self.mapping[w_j], 0)
for pair in (tuple(sorted((w_i, w_j))) for w_i in segment_word_ids):
if pair not in self.sim_cache:
self.sim_cache[pair] = self.similarity(pair, self.accumulator)
context_vector[idx] += self.sim_cache[pair] ** self.gamma
return context_vector.tocsr()
def _pair_npmi(pair, accumulator):
"""Compute normalized pairwise mutual information (**NPMI**) between a pair of words.
Parameters
----------
pair : (int, int)
The pair of words (word_id1, word_id2).
accumulator : :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator from probability_estimation.
Return
------
float
NPMI between a pair of words.
"""
return log_ratio_measure([[pair]], accumulator, True)[0]
def _cossim(cv1, cv2):
return cv1.T.dot(cv2)[0, 0] / (_magnitude(cv1) * _magnitude(cv2))
def _magnitude(sparse_vec):
return np.sqrt(np.sum(sparse_vec.data ** 2))
def _map_to_contiguous(ids_iterable):
uniq_ids = {}
n = 0
for id_ in itertools.chain.from_iterable(ids_iterable):
if id_ not in uniq_ids:
uniq_ids[id_] = n
n += 1
return uniq_ids
def _key_for_segment(segment, topic_words):
"""A segment may have a single number of an iterable of them."""
segment_key = tuple(segment) if hasattr(segment, '__iter__') else segment
return segment_key, topic_words
| 12,469 | 35.893491 | 119 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/segmentation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions to perform segmentation on a list of topics."""
import logging
logger = logging.getLogger(__name__)
def s_one_pre(topics):
"""Performs segmentation on a list of topics.
Notes
-----
Segmentation is defined as
:math:`s_{pre} = {(W', W^{*}) | W' = w_{i}; W^{*} = {w_j}; w_{i}, w_{j} \in W; i > j}`.
Parameters
----------
topics : list of np.array
list of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int)
:math:`(W', W^{*})` for all unique topic ids.
Examples
--------
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([1, 2, 3]), np.array([4, 5, 6])]
>>> segmentation.s_one_pre(topics)
[[(2, 1), (3, 1), (3, 2)], [(5, 4), (6, 4), (6, 5)]]
"""
s_one_pre_res = []
for top_words in topics:
s_one_pre_t = []
for w_prime_index, w_prime in enumerate(top_words[1:]):
for w_star in top_words[:w_prime_index + 1]:
s_one_pre_t.append((w_prime, w_star))
s_one_pre_res.append(s_one_pre_t)
return s_one_pre_res
def s_one_one(topics):
"""Perform segmentation on a list of topics.
Segmentation is defined as
:math:`s_{one} = {(W', W^{*}) | W' = {w_i}; W^{*} = {w_j}; w_{i}, w_{j} \in W; i \\neq j}`.
Parameters
----------
topics : list of `numpy.ndarray`
List of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int).
:math:`(W', W^{*})` for all unique topic ids.
Examples
-------
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([1, 2, 3]), np.array([4, 5, 6])]
>>> segmentation.s_one_one(topics)
[[(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)], [(4, 5), (4, 6), (5, 4), (5, 6), (6, 4), (6, 5)]]
"""
s_one_one_res = []
for top_words in topics:
s_one_one_t = []
for w_prime_index, w_prime in enumerate(top_words):
for w_star_index, w_star in enumerate(top_words):
if w_prime_index == w_star_index:
continue
else:
s_one_one_t.append((w_prime, w_star))
s_one_one_res.append(s_one_one_t)
return s_one_one_res
def s_one_set(topics):
"""Perform s_one_set segmentation on a list of topics.
Segmentation is defined as
:math:`s_{set} = {(W', W^{*}) | W' = {w_i}; w_{i} \in W; W^{*} = W}`
Parameters
----------
topics : list of `numpy.ndarray`
List of topics obtained from an algorithm such as LDA.
Returns
-------
list of list of (int, int).
:math:`(W', W^{*})` for all unique topic ids.
Examples
--------
>>> import numpy as np
>>> from gensim.topic_coherence import segmentation
>>>
>>> topics = [np.array([9, 10, 7])]
>>> segmentation.s_one_set(topics)
[[(9, array([ 9, 10, 7])), (10, array([ 9, 10, 7])), (7, array([ 9, 10, 7]))]]
"""
s_one_set_res = []
for top_words in topics:
s_one_set_t = []
for w_prime in top_words:
s_one_set_t.append((w_prime, top_words))
s_one_set_res.append(s_one_set_t)
return s_one_set_res
| 3,532 | 26.601563 | 104 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/probability_estimation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains functions to perform segmentation on a list of topics."""
import itertools
import logging
from gensim.topic_coherence.text_analysis import (
CorpusAccumulator, WordOccurrenceAccumulator, ParallelWordOccurrenceAccumulator,
WordVectorsAccumulator)
logger = logging.getLogger(__name__)
def p_boolean_document(corpus, segmented_topics):
"""Perform the boolean document probability estimation. Boolean document estimates the probability of a single word
as the number of documents in which the word occurs divided by the total number of documents.
Parameters
----------
corpus : iterable of list of (int, int)
The corpus of documents.
segmented_topics: list of (int, int).
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.CorpusAccumulator`
Word occurrence accumulator instance that can be used to lookup token frequencies and co-occurrence frequencies.
Examples
---------
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>>
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [(w2id['system'], w2id['graph']),(w2id['computer'], w2id['graph']),(w2id['computer'], w2id['system'])],
... [(w2id['computer'], w2id['graph']),(w2id['user'], w2id['graph']),(w2id['user'], w2id['computer'])]
... ]
>>>
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>>
>>> result = probability_estimation.p_boolean_document(corpus, segmented_topics)
>>> result.index_to_dict()
{10608: set([0]), 12736: set([1, 3]), 18451: set([5]), 5798: set([1, 2])}
"""
top_ids = unique_ids_from_segments(segmented_topics)
return CorpusAccumulator(top_ids).accumulate(corpus)
def p_boolean_sliding_window(texts, segmented_topics, dictionary, window_size, processes=1):
"""Perform the boolean sliding window probability estimation.
Parameters
----------
texts : iterable of iterable of str
Input text
segmented_topics: list of (int, int)
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Gensim dictionary mapping of the tokens and ids.
window_size : int
Size of the sliding window, 110 found out to be the ideal size for large corpora.
processes : int, optional
Number of process that will be used for
:class:`~gensim.topic_coherence.text_analysis.ParallelWordOccurrenceAccumulator`
Notes
-----
Boolean sliding window determines word counts using a sliding window. The window
moves over the documents one word token per step. Each step defines a new virtual
document by copying the window content. Boolean document is applied to these virtual
documents to compute word probabilities.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.WordOccurrenceAccumulator`
if `processes` = 1 OR
:class:`~gensim.topic_coherence.text_analysis.ParallelWordOccurrenceAccumulator`
otherwise. This is word occurrence accumulator instance that can be used to lookup
token frequencies and co-occurrence frequencies.
Examples
---------
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>>
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [(w2id['system'], w2id['graph']),(w2id['computer'], w2id['graph']),(w2id['computer'], w2id['system'])],
... [(w2id['computer'], w2id['graph']),(w2id['user'], w2id['graph']),(w2id['user'], w2id['computer'])]
... ]
>>>
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>> accumulator = probability_estimation.p_boolean_sliding_window(texts, segmented_topics, dictionary, 2)
>>>
>>> (accumulator[w2id['computer']], accumulator[w2id['user']], accumulator[w2id['system']])
(1, 3, 4)
"""
top_ids = unique_ids_from_segments(segmented_topics)
if processes <= 1:
accumulator = WordOccurrenceAccumulator(top_ids, dictionary)
else:
accumulator = ParallelWordOccurrenceAccumulator(processes, top_ids, dictionary)
logger.info("using %s to estimate probabilities from sliding windows", accumulator)
return accumulator.accumulate(texts, window_size)
def p_word2vec(texts, segmented_topics, dictionary, window_size=None, processes=1, model=None):
"""Train word2vec model on `texts` if `model` is not None.
Parameters
----------
texts : iterable of iterable of str
Input text
segmented_topics : iterable of iterable of str
Output from the segmentation of topics. Could be simply topics too.
dictionary : :class:`~gensim.corpora.dictionary`
Gensim dictionary mapping of the tokens and ids.
window_size : int, optional
Size of the sliding window.
processes : int, optional
Number of processes to use.
model : :class:`~gensim.models.word2vec.Word2Vec` or :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
it should be a pre-trained Word2Vec context vectors.
Returns
-------
:class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`
Text accumulator with trained context vectors.
Examples
--------
>>> from gensim.topic_coherence import probability_estimation
>>> from gensim.corpora.hashdictionary import HashDictionary
>>> from gensim.models import word2vec
>>>
>>> texts = [
... ['human', 'interface', 'computer'],
... ['eps', 'user', 'interface', 'system'],
... ['system', 'human', 'system', 'eps'],
... ['user', 'response', 'time'],
... ['trees'],
... ['graph', 'trees']
... ]
>>> dictionary = HashDictionary(texts)
>>> w2id = dictionary.token2id
>>>
>>> # create segmented_topics
>>> segmented_topics = [
... [(w2id['system'], w2id['graph']),(w2id['computer'], w2id['graph']),(w2id['computer'], w2id['system'])],
... [(w2id['computer'], w2id['graph']),(w2id['user'], w2id['graph']),(w2id['user'], w2id['computer'])]
... ]
>>>
>>> # create corpus
>>> corpus = [dictionary.doc2bow(text) for text in texts]
>>> sentences = [['human', 'interface', 'computer'],['survey', 'user', 'computer', 'system', 'response', 'time']]
>>> model = word2vec.Word2Vec(sentences, size=100,min_count=1)
>>> accumulator = probability_estimation.p_word2vec(texts, segmented_topics, dictionary, 2, 1, model)
"""
top_ids = unique_ids_from_segments(segmented_topics)
accumulator = WordVectorsAccumulator(
top_ids, dictionary, model, window=window_size, workers=processes)
return accumulator.accumulate(texts, window_size)
def unique_ids_from_segments(segmented_topics):
"""Return the set of all unique ids in a list of segmented topics.
Parameters
----------
segmented_topics: list of (int, int).
Each tuple (word_id_set1, word_id_set2) is either a single integer, or a `numpy.ndarray` of integers.
Returns
-------
set
Set of unique ids across all topic segments.
Example
-------
>>> from gensim.topic_coherence import probability_estimation
>>>
>>> segmentation = [[(1, 2)]]
>>> probability_estimation.unique_ids_from_segments(segmentation)
set([1, 2])
"""
unique_ids = set() # is a set of all the unique ids contained in topics.
for s_i in segmented_topics:
for word_id in itertools.chain.from_iterable(s_i):
if hasattr(word_id, '__iter__'):
unique_ids.update(word_id)
else:
unique_ids.add(word_id)
return unique_ids
| 9,041 | 37.476596 | 120 | py |
poincare_glove | poincare_glove-master/gensim/topic_coherence/__init__.py | """
This package contains implementation of the individual components of
the topic coherence pipeline.
"""
| 107 | 20.6 | 68 | py |
poincare_glove | poincare_glove-master/gensim/test/test_sharded_corpus.py | """
Testing the test sharded corpus.
"""
import os
import unittest
import random
import numpy as np
import shutil
from scipy import sparse
from gensim.utils import is_corpus
from gensim.corpora.sharded_corpus import ShardedCorpus
from gensim.utils import mock_data, xrange
#############################################################################
class TestShardedCorpus(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.dim = 1000
# cls.data = mock_data(dim=cls.dim)
#
# random_string = ''.join([random.choice('1234567890') for _ in xrange(8)])
#
# cls.tmp_dir = 'test-temp-' + random_string
# os.makedirs(cls.tmp_dir)
#
# cls.tmp_fname = os.path.join(cls.tmp_dir,
# 'shcorp.' + random_string + '.tmp')
# @classmethod
# def tearDownClass(cls):
# shutil.rmtree(cls.tmp_dir)
def setUp(self):
self.dim = 1000
self.random_string = ''.join([random.choice('1234567890') for _ in xrange(8)])
self.tmp_dir = 'test-temp-' + self.random_string
os.makedirs(self.tmp_dir)
self.tmp_fname = os.path.join(self.tmp_dir,
'shcorp.' + self.random_string + '.tmp')
self.data = mock_data(dim=1000)
self.corpus = ShardedCorpus(self.tmp_fname, self.data, dim=self.dim,
shardsize=100)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_init(self):
# Test that the shards were actually created during setUp
self.assertTrue(os.path.isfile(self.tmp_fname + '.1'))
def test_load(self):
# Test that the shards were actually created
self.assertTrue(os.path.isfile(self.tmp_fname + '.1'))
self.corpus.save()
loaded_corpus = ShardedCorpus.load(self.tmp_fname)
self.assertEqual(loaded_corpus.dim, self.corpus.dim)
self.assertEqual(loaded_corpus.n_shards, self.corpus.n_shards)
def test_getitem(self):
_ = self.corpus[130] # noqa:F841
# Does retrieving the item load the correct shard?
self.assertEqual(self.corpus.current_shard_n, 1)
item = self.corpus[220:227]
self.assertEqual((7, self.corpus.dim), item.shape)
self.assertEqual(self.corpus.current_shard_n, 2)
for i in xrange(220, 227):
self.assertTrue(np.array_equal(self.corpus[i], item[i - 220]))
def test_sparse_serialization(self):
no_exception = True
try:
ShardedCorpus(self.tmp_fname, self.data, shardsize=100, dim=self.dim, sparse_serialization=True)
except Exception:
no_exception = False
raise
finally:
self.assertTrue(no_exception)
def test_getitem_dense2dense(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=False
)
item = corpus[3]
self.assertTrue(isinstance(item, np.ndarray))
self.assertEqual(item.shape, (corpus.dim,))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, np.ndarray))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, np.ndarray))
self.assertEqual(ilist.shape, (4, corpus.dim))
self.assertEqual(ilist.all(), dslice.all())
def test_getitem_dense2sparse(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=True
)
item = corpus[3]
self.assertTrue(isinstance(item, sparse.csr_matrix))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, sparse.csr_matrix))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, sparse.csr_matrix))
self.assertEqual(ilist.shape, (4, corpus.dim))
self.assertEqual((ilist != dslice).getnnz(), 0)
def test_getitem_sparse2sparse(self):
sp_tmp_fname = self.tmp_fname + '.sparse'
corpus = ShardedCorpus(
sp_tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=True, sparse_retrieval=True
)
dense_corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=True
)
item = corpus[3]
self.assertTrue(isinstance(item, sparse.csr_matrix))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, sparse.csr_matrix))
self.assertEqual(dslice.shape, (4, corpus.dim))
expected_nnz = sum([len(self.data[i]) for i in range(2, 6)])
self.assertEqual(dslice.getnnz(), expected_nnz)
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, sparse.csr_matrix))
self.assertEqual(ilist.shape, (4, corpus.dim))
# Also compare with what the dense dataset is giving us
d_dslice = dense_corpus[2:6]
self.assertEqual((d_dslice != dslice).getnnz(), 0)
self.assertEqual((ilist != dslice).getnnz(), 0)
def test_getitem_sparse2dense(self):
sp_tmp_fname = self.tmp_fname + '.sparse'
corpus = ShardedCorpus(
sp_tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=True, sparse_retrieval=False
)
dense_corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=False
)
item = corpus[3]
self.assertTrue(isinstance(item, np.ndarray))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, np.ndarray))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, np.ndarray))
self.assertEqual(ilist.shape, (4, corpus.dim))
# Also compare with what the dense dataset is giving us
d_dslice = dense_corpus[2:6]
self.assertEqual(dslice.all(), d_dslice.all())
self.assertEqual(ilist.all(), dslice.all())
def test_getitem_dense2gensim(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, gensim=True
)
item = corpus[3]
self.assertTrue(isinstance(item, list))
self.assertTrue(isinstance(item[0], tuple))
dslice = corpus[2:6]
self.assertTrue(next(dslice) == corpus[2])
dslice = list(dslice)
self.assertTrue(isinstance(dslice, list))
self.assertTrue(isinstance(dslice[0], list))
self.assertTrue(isinstance(dslice[0][0], tuple))
iscorp, _ = is_corpus(dslice)
self.assertTrue(iscorp, "Is the object returned by slice notation a gensim corpus?")
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(next(ilist) == corpus[2])
ilist = list(ilist)
self.assertTrue(isinstance(ilist, list))
self.assertTrue(isinstance(ilist[0], list))
self.assertTrue(isinstance(ilist[0][0], tuple))
# From generators to lists
self.assertEqual(len(ilist), len(dslice))
for i in xrange(len(ilist)):
self.assertEqual(len(ilist[i]), len(dslice[i]),
"Row %d: dims %d/%d" % (i, len(ilist[i]),
len(dslice[i])))
for j in xrange(len(ilist[i])):
self.assertEqual(ilist[i][j], dslice[i][j],
"ilist[%d][%d] = %s ,dslice[%d][%d] = %s" % (
i, j, str(ilist[i][j]), i, j,
str(dslice[i][j])))
iscorp, _ = is_corpus(ilist)
self.assertTrue(iscorp, "Is the object returned by list notation a gensim corpus?")
def test_resize(self):
dataset = ShardedCorpus(self.tmp_fname, self.data, shardsize=100,
dim=self.dim)
self.assertEqual(10, dataset.n_shards)
dataset.resize_shards(250)
self.assertEqual(4, dataset.n_shards)
for n in xrange(dataset.n_shards):
fname = dataset._shard_name(n)
self.assertTrue(os.path.isfile(fname))
def test_init_with_generator(self):
def data_generator():
yield [(0, 1)]
yield [(1, 1)]
gen_tmp_fname = self.tmp_fname + '.generator'
corpus = ShardedCorpus(gen_tmp_fname, data_generator(), dim=2)
self.assertEqual(2, len(corpus))
self.assertEqual(1, corpus[0][0])
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
tests = loader.loadTestsFromTestCase(TestShardedCorpus)
suite.addTest(tests)
runner = unittest.TextTestRunner()
runner.run(suite)
| 9,286 | 32.286738 | 108 | py |
poincare_glove | poincare_glove-master/gensim/test/test_normmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from gensim.corpora import mmcorpus
from gensim.models import normmodel
from gensim.test.utils import datapath, get_tmpfile
class TestNormModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
# Choose doc to be normalized. [3] chosen to demonstrate different results for l1 and l2 norm.
# doc is [(1, 1.0), (5, 2.0), (8, 1.0)]
self.doc = list(self.corpus)[3]
self.model_l1 = normmodel.NormModel(self.corpus, norm='l1')
self.model_l2 = normmodel.NormModel(self.corpus, norm='l2')
def test_tupleInput_l1(self):
"""Test tuple input for l1 transformation"""
normalized = self.model_l1.normalize(self.doc)
expected = [(1, 0.25), (5, 0.5), (8, 0.25)]
self.assertTrue(np.allclose(normalized, expected))
def test_sparseCSRInput_l1(self):
"""Test sparse csr matrix input for l1 transformation"""
row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
sparse_matrix = csr_matrix((data, (row, col)), shape=(3, 3))
normalized = self.model_l1.normalize(sparse_matrix)
# Check if output is of same type
self.assertTrue(issparse(normalized))
# Check if output is correct
expected = np.array([[0.04761905, 0., 0.0952381],
[0., 0., 0.14285714],
[0.19047619, 0.23809524, 0.28571429]])
self.assertTrue(np.allclose(normalized.toarray(), expected))
def test_numpyndarrayInput_l1(self):
"""Test for np ndarray input for l1 transformation"""
ndarray_matrix = np.array([
[1, 0, 2],
[0, 0, 3],
[4, 5, 6]
])
normalized = self.model_l1.normalize(ndarray_matrix)
# Check if output is of same type
self.assertTrue(isinstance(normalized, np.ndarray))
# Check if output is correct
expected = np.array([
[0.04761905, 0., 0.0952381],
[0., 0., 0.14285714],
[0.19047619, 0.23809524, 0.28571429]
])
self.assertTrue(np.allclose(normalized, expected))
# Test if error is raised on unsupported input type
self.assertRaises(ValueError, lambda model, doc: model.normalize(doc), self.model_l1, [1, 2, 3])
def test_tupleInput_l2(self):
"""Test tuple input for l2 transformation"""
normalized = self.model_l2.normalize(self.doc)
expected = [(1, 0.4082482904638631), (5, 0.8164965809277261), (8, 0.4082482904638631)]
self.assertTrue(np.allclose(normalized, expected))
def test_sparseCSRInput_l2(self):
"""Test sparse csr matrix input for l2 transformation"""
row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
sparse_matrix = csr_matrix((data, (row, col)), shape=(3, 3))
normalized = self.model_l2.normalize(sparse_matrix)
# Check if output is of same type
self.assertTrue(issparse(normalized))
# Check if output is correct
expected = np.array([
[0.10482848, 0., 0.20965697],
[0., 0., 0.31448545],
[0.41931393, 0.52414242, 0.6289709]
])
self.assertTrue(np.allclose(normalized.toarray(), expected))
def test_numpyndarrayInput_l2(self):
"""Test for np ndarray input for l2 transformation"""
ndarray_matrix = np.array([
[1, 0, 2],
[0, 0, 3],
[4, 5, 6]
])
normalized = self.model_l2.normalize(ndarray_matrix)
# Check if output is of same type
self.assertTrue(isinstance(normalized, np.ndarray))
# Check if output is correct
expected = np.array([
[0.10482848, 0., 0.20965697],
[0., 0., 0.31448545],
[0.41931393, 0.52414242, 0.6289709]
])
self.assertTrue(np.allclose(normalized, expected))
# Test if error is raised on unsupported input type
self.assertRaises(ValueError, lambda model, doc: model.normalize(doc), self.model_l2, [1, 2, 3])
def testInit(self):
"""Test if error messages raised on unsupported norm"""
self.assertRaises(ValueError, normmodel.NormModel, self.corpus, 'l0')
def testPersistence(self):
fname = get_tmpfile('gensim_models.tst')
model = normmodel.NormModel(self.corpus)
model.save(fname)
model2 = normmodel.NormModel.load(fname)
self.assertTrue(model.norms == model2.norms)
tstvec = []
# try projecting an empty vector
self.assertTrue(np.allclose(model.normalize(tstvec), model2.normalize(tstvec)))
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = normmodel.NormModel(self.corpus)
model.save(fname)
model2 = normmodel.NormModel.load(fname, mmap=None)
self.assertTrue(model.norms == model2.norms)
tstvec = []
# try projecting an empty vector
self.assertTrue(np.allclose(model.normalize(tstvec), model2.normalize(tstvec)))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 5,765 | 35.961538 | 104 | py |
poincare_glove | poincare_glove-master/gensim/test/test_aggregation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
from gensim.topic_coherence import aggregation
class TestAggregation(unittest.TestCase):
def setUp(self):
self.confirmed_measures = [1.1, 2.2, 3.3, 4.4]
def testArithmeticMean(self):
"""Test arithmetic_mean()"""
obtained = aggregation.arithmetic_mean(self.confirmed_measures)
expected = 2.75
self.assertEqual(obtained, expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 774 | 24 | 76 | py |
poincare_glove | poincare_glove-master/gensim/test/test_similarity_metrics.py | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to check similarity functions and isbow function.
"""
import logging
import unittest
from gensim import matutils
from scipy.sparse import csr_matrix, csc_matrix
import numpy as np
import math
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import ldamodel
from gensim.test.utils import datapath, common_dictionary, common_corpus
class TestIsBow(unittest.TestCase):
def test_None(self):
# test None
result = matutils.isbow(None)
expected = False
self.assertEqual(expected, result)
def test_bow(self):
# test list words
# one bag of words
potentialbow = [(0, 0.4)]
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# multiple bags
potentialbow = [(0, 4.), (1, 2.), (2, 5.), (3, 8.)]
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking empty input
potentialbow = []
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking corpus; should return false
potentialbow = [[(2, 1), (3, 1), (4, 1), (5, 1), (1, 1), (7, 1)]]
result = matutils.isbow(potentialbow)
expected = False
self.assertEqual(expected, result)
# not a bag of words, should return false
potentialbow = [(1, 3, 6)]
result = matutils.isbow(potentialbow)
expected = False
self.assertEqual(expected, result)
# checking sparse matrix format bag of words
potentialbow = csr_matrix([[1, 0.4], [0, 0.3], [2, 0.1]])
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking np array format bag of words
potentialbow = np.array([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
class TestHellinger(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(common_corpus, id2word=common_dictionary, num_topics=2, passes=100)
def test_inputs(self):
# checking empty inputs
vec_1 = []
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking np array and list input
vec_1 = np.array([])
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking scipy csr matrix and list input
vec_1 = csr_matrix([])
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
def test_distributions(self):
# checking different length bag of words as inputs
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.hellinger(vec_1, vec_2)
expected = 0.484060507634
self.assertAlmostEqual(expected, result)
# checking symmetrical bag of words inputs return same distance
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1), (8, 0.1), (10, 0.8), (9, 0.1)]
result = matutils.hellinger(vec_1, vec_2)
result_symmetric = matutils.hellinger(vec_2, vec_1)
expected = 0.856921568786
self.assertAlmostEqual(expected, result)
self.assertAlmostEqual(expected, result_symmetric)
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 0.3], [0, 0.4], [2, 0.3]])
vec_2 = csr_matrix([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.hellinger(vec_1, vec_2)
expected = 0.160618030536
self.assertAlmostEqual(expected, result)
# checking ndarray, list as inputs
vec_1 = np.array([0.6, 0.1, 0.1, 0.2])
vec_2 = [0.2, 0.2, 0.1, 0.5]
result = matutils.hellinger(vec_1, vec_2)
expected = 0.309742984153
self.assertAlmostEqual(expected, result)
# testing LDA distribution vectors
np.random.seed(0)
model = self.class_(self.corpus, id2word=common_dictionary, num_topics=2, passes=100)
lda_vec1 = model[[(1, 2), (2, 3)]]
lda_vec2 = model[[(2, 2), (1, 3)]]
result = matutils.hellinger(lda_vec1, lda_vec2)
expected = 1.0406845281146034e-06
self.assertAlmostEqual(expected, result)
class TestKL(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(common_corpus, id2word=common_dictionary, num_topics=2, passes=100)
def test_inputs(self):
# checking empty inputs
vec_1 = []
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking np array and list input
vec_1 = np.array([])
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking scipy csr matrix and list input
vec_1 = csr_matrix([])
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.kullback_leibler(vec_2, vec_1, 8)
expected = 0.55451775
self.assertAlmostEqual(expected, result, places=5)
# KL is not symetric; vec1 compared with vec2 will contain log of zeros and return infinity
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.kullback_leibler(vec_1, vec_2, 8)
self.assertTrue(math.isinf(result))
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 0.3], [0, 0.4], [2, 0.3]])
vec_2 = csr_matrix([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.kullback_leibler(vec_1, vec_2, 3)
expected = 0.0894502
self.assertAlmostEqual(expected, result, places=5)
# checking ndarray, list as inputs
vec_1 = np.array([0.6, 0.1, 0.1, 0.2])
vec_2 = [0.2, 0.2, 0.1, 0.5]
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.40659450877
self.assertAlmostEqual(expected, result, places=5)
# testing LDA distribution vectors
np.random.seed(0)
model = self.class_(self.corpus, id2word=common_dictionary, num_topics=2, passes=100)
lda_vec1 = model[[(1, 2), (2, 3)]]
lda_vec2 = model[[(2, 2), (1, 3)]]
result = matutils.kullback_leibler(lda_vec1, lda_vec2)
expected = 4.283407e-12
self.assertAlmostEqual(expected, result, places=5)
class TestJaccard(unittest.TestCase):
def test_inputs(self):
# all empty inputs will give a divide by zero exception
vec_1 = []
vec_2 = []
self.assertRaises(ZeroDivisionError, matutils.jaccard, vec_1, vec_2)
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(2, 1), (3, 4), (4, 1), (5, 1), (1, 1), (7, 2)]
vec_2 = [(1, 1), (3, 8), (4, 1)]
result = matutils.jaccard(vec_2, vec_1)
expected = 1 - 0.3
self.assertAlmostEqual(expected, result)
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 3], [0, 4], [2, 3]])
vec_2 = csr_matrix([[1, 4], [0, 2], [2, 2]])
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.388888888889
self.assertAlmostEqual(expected, result)
# checking ndarray, list as inputs
vec_1 = np.array([6, 1, 2, 3])
vec_2 = [4, 3, 2, 5]
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.333333333333
self.assertAlmostEqual(expected, result)
class TestSoftCosineSimilarity(unittest.TestCase):
def test_inputs(self):
# checking empty inputs
vec_1 = []
vec_2 = []
similarity_matrix = csc_matrix((0, 0))
result = matutils.softcossim(vec_1, vec_2, similarity_matrix)
expected = 0.0
self.assertEqual(expected, result)
# checking CSR term similarity matrix format
similarity_matrix = csr_matrix((0, 0))
result = matutils.softcossim(vec_1, vec_2, similarity_matrix)
expected = 0.0
self.assertEqual(expected, result)
# checking unknown term similarity matrix format
with self.assertRaises(ValueError):
matutils.softcossim(vec_1, vec_2, np.matrix([]))
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(0, 1.0), (2, 1.0)] # hello world
vec_2 = [(1, 1.0), (2, 1.0)] # hi world
similarity_matrix = csc_matrix([[1, 0.5, 0], [0.5, 1, 0], [0, 0, 1]])
result = matutils.softcossim(vec_1, vec_2, similarity_matrix)
expected = 0.75
self.assertAlmostEqual(expected, result)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 9,813 | 34.557971 | 100 | py |
poincare_glove | poincare_glove-master/gensim/test/test_poincare.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the poincare module from the models package.
"""
import logging
import os
import tempfile
import unittest
try:
from mock import Mock
except ImportError:
from unittest.mock import Mock
import numpy as np
try:
import autograd # noqa:F401
autograd_installed = True
except ImportError:
autograd_installed = False
from gensim.models.poincare import PoincareRelations, PoincareModel, PoincareKeyedVectors
from gensim.test.utils import datapath
logger = logging.getLogger(__name__)
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_word2vec.tst')
class TestPoincareData(unittest.TestCase):
def test_encoding_handling(self):
"""Tests whether utf8 and non-utf8 data loaded correctly."""
non_utf8_file = datapath('poincare_cp852.tsv')
relations = [relation for relation in PoincareRelations(non_utf8_file, encoding='cp852')]
self.assertEqual(len(relations), 2)
self.assertEqual(relations[0], (u'tímto', u'budeš'))
utf8_file = datapath('poincare_utf8.tsv')
relations = [relation for relation in PoincareRelations(utf8_file)]
self.assertEqual(len(relations), 2)
self.assertEqual(relations[0], (u'tímto', u'budeš'))
class TestPoincareModel(unittest.TestCase):
def setUp(self):
self.data = PoincareRelations(datapath('poincare_hypernyms.tsv'))
self.data_large = PoincareRelations(datapath('poincare_hypernyms_large.tsv'))
def models_equal(self, model_1, model_2):
self.assertEqual(len(model_1.kv.vocab), len(model_2.kv.vocab))
self.assertEqual(set(model_1.kv.vocab.keys()), set(model_2.kv.vocab.keys()))
self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))
def test_data_counts(self):
"""Tests whether data has been loaded correctly and completely."""
model = PoincareModel(self.data)
self.assertEqual(len(model.all_relations), 5)
self.assertEqual(len(model.node_relations[model.kv.vocab['kangaroo.n.01'].index]), 3)
self.assertEqual(len(model.kv.vocab), 7)
self.assertTrue('mammal.n.01' not in model.node_relations)
def test_data_counts_with_bytes(self):
"""Tests whether input bytes data is loaded correctly and completely."""
model = PoincareModel([(b'\x80\x01c', b'\x50\x71a'), (b'node.1', b'node.2')])
self.assertEqual(len(model.all_relations), 2)
self.assertEqual(len(model.node_relations[model.kv.vocab[b'\x80\x01c'].index]), 1)
self.assertEqual(len(model.kv.vocab), 4)
self.assertTrue(b'\x50\x71a' not in model.node_relations)
def test_persistence(self):
"""Tests whether the model is saved and loaded correctly."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(testfile())
loaded = PoincareModel.load(testfile())
self.models_equal(model, loaded)
def test_persistence_separate_file(self):
"""Tests whether the model is saved and loaded correctly when the arrays are stored separately."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(testfile(), sep_limit=1)
loaded = PoincareModel.load(testfile())
self.models_equal(model, loaded)
def test_invalid_data_raises_error(self):
"""Tests that error is raised on invalid input data."""
with self.assertRaises(ValueError):
PoincareModel([("a", "b", "c")])
with self.assertRaises(ValueError):
PoincareModel(["a", "b", "c"])
with self.assertRaises(ValueError):
PoincareModel("ab")
def test_vector_shape(self):
"""Tests whether vectors are initialized with the correct size."""
model = PoincareModel(self.data, size=20)
self.assertEqual(model.kv.syn0.shape, (7, 20))
def test_vector_dtype(self):
"""Tests whether vectors have the correct dtype before and after training."""
model = PoincareModel(self.data_large, dtype=np.float32, burn_in=0, negative=3)
self.assertEqual(model.kv.syn0.dtype, np.float32)
model.train(epochs=1)
self.assertEqual(model.kv.syn0.dtype, np.float32)
def test_training(self):
"""Tests that vectors are different before and after training."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
old_vectors = np.copy(model.kv.syn0)
model.train(epochs=2)
self.assertFalse(np.allclose(old_vectors, model.kv.syn0))
def test_training_multiple(self):
"""Tests that calling train multiple times results in different vectors."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
model.train(epochs=2)
old_vectors = np.copy(model.kv.syn0)
model.train(epochs=1)
self.assertFalse(np.allclose(old_vectors, model.kv.syn0))
old_vectors = np.copy(model.kv.syn0)
model.train(epochs=0)
self.assertTrue(np.allclose(old_vectors, model.kv.syn0))
def test_gradients_check(self):
"""Tests that the model is trained successfully with gradients check enabled."""
model = PoincareModel(self.data, negative=3)
try:
model.train(epochs=1, batch_size=1, check_gradients_every=1)
except Exception as e:
self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))
@unittest.skipIf(not autograd_installed, 'autograd needs to be installed for this test')
def test_wrong_gradients_raises_assertion(self):
"""Tests that discrepancy in gradients raises an error."""
model = PoincareModel(self.data, negative=3)
model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))
with self.assertRaises(AssertionError):
model.train(epochs=1, batch_size=1, check_gradients_every=1)
def test_reproducible(self):
"""Tests that vectors are same for two independent models trained with the same seed."""
model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_1.train(epochs=2)
model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_2.train(epochs=2)
self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))
def test_burn_in(self):
"""Tests that vectors are different after burn-in."""
model = PoincareModel(self.data, burn_in=1, negative=3)
original_vectors = np.copy(model.kv.syn0)
model.train(epochs=0)
self.assertFalse(np.allclose(model.kv.syn0, original_vectors))
def test_burn_in_only_done_once(self):
"""Tests that burn-in does not happen when train is called a second time."""
model = PoincareModel(self.data, negative=3, burn_in=1)
model.train(epochs=0)
original_vectors = np.copy(model.kv.syn0)
model.train(epochs=0)
self.assertTrue(np.allclose(model.kv.syn0, original_vectors))
def test_negatives(self):
"""Tests that correct number of negatives are sampled."""
model = PoincareModel(self.data, negative=5)
self.assertEqual(len(model._get_candidate_negatives()), 5)
def test_error_if_negative_more_than_population(self):
"""Tests error is rased if number of negatives to sample is more than remaining nodes."""
model = PoincareModel(self.data, negative=5)
with self.assertRaises(ValueError):
model.train(epochs=1)
def test_no_duplicates_and_positives_in_negative_sample(self):
"""Tests that no duplicates or positively related nodes are present in negative samples."""
model = PoincareModel(self.data_large, negative=3)
positive_nodes = model.node_relations[0] # Positive nodes for node 0
num_samples = 100 # Repeat experiment multiple times
for i in range(num_samples):
negatives = model._sample_negatives(0)
self.assertFalse(positive_nodes & set(negatives))
self.assertEqual(len(negatives), len(set(negatives)))
def test_handle_duplicates(self):
"""Tests that correct number of negatives are used."""
vector_updates = np.array([[0.5, 0.5], [0.1, 0.2], [0.3, -0.2]])
node_indices = [0, 1, 0]
PoincareModel._handle_duplicates(vector_updates, node_indices)
vector_updates_expected = np.array([[0.0, 0.0], [0.1, 0.2], [0.8, 0.3]])
self.assertTrue((vector_updates == vector_updates_expected).all())
@classmethod
def tearDownClass(cls):
try:
os.unlink(testfile())
except OSError:
pass
class TestPoincareKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = PoincareKeyedVectors.load_word2vec_format(datapath('poincare_vectors.bin'), binary=True)
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
'mammal.n.01'
]
predicted = [result[0] for result in self.vectors.most_similar('dog.n.01', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)
predicted = self.vectors.most_similar('dog.n.01', topn=None)
self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)
self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index2word[:5])
predicted = set(result[0] for result in self.vectors.most_similar('dog.n.01', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('dog.n.01', ['carnivore.n.01', 'placental.n.01', 'mammal.n.01'])
self.assertEqual(predicted, 'carnivore.n.01')
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'dog.n.01',
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
]
input_vector = self.vectors['dog.n.01']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))
self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)
def test_distances(self):
"""Test that distances between one word and multiple other words have expected values."""
distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances('dog.n.01')
self.assertEqual(len(distances), len(self.vectors.vocab))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_distances_with_vector_input(self):
"""Test that distances between input vector and a list of words have expected values."""
input_vector = self.vectors['dog.n.01']
distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances(input_vector)
self.assertEqual(len(distances), len(self.vectors.vocab))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_poincare_distances_batch(self):
"""Test that poincare_distance_batch returns correct distances."""
vector_1 = self.vectors['dog.n.01']
vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]
distances = self.vectors.vector_distance_batch(vector_1, vectors_2)
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
def test_poincare_distance(self):
"""Test that poincare_distance returns correct distance between two input vectors."""
vector_1 = self.vectors['dog.n.01']
vector_2 = self.vectors['mammal.n.01']
distance = self.vectors.vector_distance(vector_1, vector_2)
self.assertTrue(np.allclose(distance, 4.5278745))
distance = self.vectors.vector_distance(vector_1, vector_1)
self.assertTrue(np.allclose(distance, 0))
def test_closest_child(self):
"""Test closest_child returns expected value and returns None for lowest node in hierarchy."""
self.assertEqual(self.vectors.closest_child('dog.n.01'), 'terrier.n.01')
self.assertEqual(self.vectors.closest_child('harbor_porpoise.n.01'), None)
def test_closest_parent(self):
"""Test closest_parent returns expected value and returns None for highest node in hierarchy."""
self.assertEqual(self.vectors.closest_parent('dog.n.01'), 'canine.n.02')
self.assertEqual(self.vectors.closest_parent('mammal.n.01'), None)
def test_ancestors(self):
"""Test ancestors returns expected list and returns empty list for highest node in hierarchy."""
expected = ['canine.n.02', 'carnivore.n.01', 'placental.n.01', 'mammal.n.01']
self.assertEqual(self.vectors.ancestors('dog.n.01'), expected)
expected = []
self.assertEqual(self.vectors.ancestors('mammal.n.01'), expected)
def test_descendants(self):
"""Test descendants returns expected list and returns empty list for lowest node in hierarchy."""
expected = [
'terrier.n.01', 'sporting_dog.n.01', 'spaniel.n.01', 'water_spaniel.n.01', 'irish_water_spaniel.n.01'
]
self.assertEqual(self.vectors.descendants('dog.n.01'), expected)
self.assertEqual(self.vectors.descendants('dog.n.01', max_depth=3), expected[:3])
def test_similarity(self):
"""Test similarity returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))
def norm(self):
"""Test norm returns expected value."""
self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))
self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))
def test_difference_in_hierarchy(self):
"""Test difference_in_hierarchy returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'dog.n.01'), 0))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('mammal.n.01', 'dog.n.01'), 0.9384287))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'mammal.n.01'), -0.9384287))
def test_words_closer_than(self):
"""Test words_closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.words_closer_than('dog.n.01', 'dog.n.01'), [])
expected = set(['canine.n.02', 'hunting_dog.n.01'])
self.assertEqual(set(self.vectors.words_closer_than('dog.n.01', 'carnivore.n.01')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)
self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 16,790 | 44.877049 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.