repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
poincare_glove | poincare_glove-master/gensim/test/test_ldamodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numbers
import six
import numpy as np
from numpy.testing import assert_allclose
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import ldamodel, ldamulticore
from gensim import matutils, utils
from gensim.test import basetmtests
from gensim.test.utils import datapath, get_tmpfile, common_texts
dictionary = Dictionary(common_texts)
corpus = [dictionary.doc2bow(text) for text in common_texts]
def testRandomState():
testcases = [np.random.seed(0), None, np.random.RandomState(0), 0]
for testcase in testcases:
assert(isinstance(utils.get_random_state(testcase), np.random.RandomState))
class TestLdaModel(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(corpus, id2word=dictionary, num_topics=2, passes=100)
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(25): # restart at most 5 times
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
# must contain the same values, up to re-ordering
passed = np.allclose(sorted(vec), sorted(expected), atol=1e-1)
if passed:
break
logging.warning(
"LDA failed to converge on attempt %i (got %s, expected %s)", i, sorted(vec), sorted(expected)
)
self.assertTrue(passed)
def testAlphaAuto(self):
model1 = self.class_(corpus, id2word=dictionary, alpha='symmetric', passes=10)
modelauto = self.class_(corpus, id2word=dictionary, alpha='auto', passes=10)
# did we learn something?
self.assertFalse(all(np.equal(model1.alpha, modelauto.alpha)))
def testAlpha(self):
kwargs = dict(
id2word=dictionary,
num_topics=2,
alpha=None
)
expected_shape = (2,)
# should not raise anything
self.class_(**kwargs)
kwargs['alpha'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, np.array([0.5, 0.5]))
kwargs['alpha'] = 'asymmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, [0.630602, 0.369398], rtol=1e-5)
kwargs['alpha'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, np.array([0.3, 0.3]))
kwargs['alpha'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, np.array([3, 3]))
kwargs['alpha'] = [0.3, 0.3]
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, np.array([0.3, 0.3]))
kwargs['alpha'] = np.array([0.3, 0.3])
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
assert_allclose(model.alpha, np.array([0.3, 0.3]))
# all should raise an exception for being wrong shape
kwargs['alpha'] = [0.3, 0.3, 0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [[0.3], [0.3]]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
def testEtaAuto(self):
model1 = self.class_(corpus, id2word=dictionary, eta='symmetric', passes=10)
modelauto = self.class_(corpus, id2word=dictionary, eta='auto', passes=10)
# did we learn something?
self.assertFalse(np.allclose(model1.eta, modelauto.eta))
def testEta(self):
kwargs = dict(
id2word=dictionary,
num_topics=2,
eta=None
)
num_terms = len(dictionary)
expected_shape = (num_terms,)
# should not raise anything
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([0.5] * num_terms))
kwargs['eta'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([0.5] * num_terms))
kwargs['eta'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([0.3] * num_terms))
kwargs['eta'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([3] * num_terms))
kwargs['eta'] = [0.3] * num_terms
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([0.3] * num_terms))
kwargs['eta'] = np.array([0.3] * num_terms)
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
assert_allclose(model.eta, np.array([0.3] * num_terms))
# should be ok with num_topics x num_terms
testeta = np.array([[0.5] * len(dictionary)] * 2)
kwargs['eta'] = testeta
self.class_(**kwargs)
# all should raise an exception for being wrong shape
kwargs['eta'] = testeta.reshape(tuple(reversed(testeta.shape)))
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3] * (num_terms + 1)
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
kwargs['eta'] = "asymmetric"
self.assertRaises(ValueError, self.class_, **kwargs)
def testTopTopics(self):
top_topics = self.model.top_topics(self.corpus)
for topic, score in top_topics:
self.assertTrue(isinstance(topic, list))
self.assertTrue(isinstance(score, float))
for v, k in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(np.issubdtype(v, float))
def testGetTopicTerms(self):
topic_terms = self.model.get_topic_terms(1)
for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
def testGetDocumentTopics(self):
model = self.class_(
self.corpus, id2word=dictionary, num_topics=2, passes=100, random_state=np.random.seed(0)
)
doc_topics = model.get_document_topics(self.corpus)
for topic in doc_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, int))
self.assertTrue(np.issubdtype(v, float))
# Test case to use the get_document_topic function for the corpus
all_topics = model.get_document_topics(self.corpus, per_word_topics=True)
self.assertEqual(model.state.numdocs, len(corpus))
for topic in all_topics:
self.assertTrue(isinstance(topic, tuple))
for k, v in topic[0]: # list of doc_topics
self.assertTrue(isinstance(k, int))
self.assertTrue(np.issubdtype(v, float))
for w, topic_list in topic[1]: # list of word_topics
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(topic_list, list))
for w, phi_values in topic[2]: # list of word_phis
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(phi_values, list))
# Test case to check the filtering effect of minimum_probability and minimum_phi_value
doc_topic_count_na = 0
word_phi_count_na = 0
all_topics = model.get_document_topics(
self.corpus, minimum_probability=0.8, minimum_phi_value=1.0, per_word_topics=True
)
self.assertEqual(model.state.numdocs, len(corpus))
for topic in all_topics:
self.assertTrue(isinstance(topic, tuple))
for k, v in topic[0]: # list of doc_topics
self.assertTrue(isinstance(k, int))
self.assertTrue(np.issubdtype(v, float))
if len(topic[0]) != 0:
doc_topic_count_na += 1
for w, topic_list in topic[1]: # list of word_topics
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(topic_list, list))
for w, phi_values in topic[2]: # list of word_phis
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(phi_values, list))
if len(phi_values) != 0:
word_phi_count_na += 1
self.assertTrue(model.state.numdocs > doc_topic_count_na)
self.assertTrue(sum([len(i) for i in corpus]) > word_phi_count_na)
doc_topics, word_topics, word_phis = model.get_document_topics(self.corpus[1], per_word_topics=True)
for k, v in doc_topics:
self.assertTrue(isinstance(k, int))
self.assertTrue(np.issubdtype(v, float))
for w, topic_list in word_topics:
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(topic_list, list))
for w, phi_values in word_phis:
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(phi_values, list))
# word_topics looks like this: ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).
# we check one case in word_topics, i.e of the first word in the doc, and it's likely topics.
# FIXME: Fails on osx and win
# expected_word = 0
# self.assertEqual(word_topics[0][0], expected_word)
# self.assertTrue(0 in word_topics[0][1])
def testTermTopics(self):
model = self.class_(
self.corpus, id2word=dictionary, num_topics=2, passes=100, random_state=np.random.seed(0)
)
# check with word_type
result = model.get_term_topics(2)
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
# checks if topic '1' is in the result list
# FIXME: Fails on osx and win
# self.assertTrue(1 in result[0])
# if user has entered word instead, check with word
result = model.get_term_topics(str(model.id2word[2]))
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
# checks if topic '1' is in the result list
# FIXME: Fails on osx and win
# self.assertTrue(1 in result[0])
def testPasses(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
test_rhots = list()
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2)
def final_rhot(model):
return pow(model.offset + (1 * model.num_updates) / model.chunksize, -model.decay)
# generate 5 updates to test rhot on
for x in range(5):
model.update(self.corpus)
test_rhots.append(final_rhot(model))
for passes in [1, 5, 10, 50, 100]:
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2, passes=passes)
self.assertEqual(final_rhot(model), 1.0)
# make sure the rhot matches the test after each update
for test_rhot in test_rhots:
model.update(self.corpus)
msg = ", ".join(str(x) for x in [passes, model.num_updates, model.state.numdocs])
self.assertAlmostEqual(final_rhot(model), test_rhot, msg=msg)
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))
# def testTopicSeeding(self):
# for topic in range(2):
# passed = False
# for i in range(5): # restart at most this many times, to mitigate LDA randomness
# # try seeding it both ways round, check you get the same
# # topics out but with which way round they are depending
# # on the way round they're seeded
# eta = np.ones((2, len(dictionary))) * 0.5
# system = dictionary.token2id[u'system']
# trees = dictionary.token2id[u'trees']
# # aggressively seed the word 'system', in one of the
# # two topics, 10 times higher than the other words
# eta[topic, system] *= 10.0
# model = self.class_(id2word=dictionary, num_topics=2, passes=200, eta=eta)
# model.update(self.corpus)
# topics = [{word: p for p, word in model.show_topic(j, topn=None)} for j in range(2)]
# # check that the word 'system' in the topic we seeded got a high weight,
# # and the word 'trees' (the main word in the other topic) a low weight --
# # and vice versa for the other topic (which we didn't seed with 'system')
# passed = (
# (topics[topic][u'system'] > topics[topic][u'trees'])
# and
# (topics[1 - topic][u'system'] < topics[1 - topic][u'trees'])
# )
# if passed:
# break
# logging.warning("LDA failed to converge on attempt %i (got %s)", i, topics)
# self.assertTrue(passed)
def testPersistence(self):
fname = get_tmpfile('gensim_models_lda.tst')
model = self.model
model.save(fname)
model2 = self.class_.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testModelCompatibilityWithPythonVersions(self):
fname_model_2_7 = datapath('ldamodel_python_2_7')
model_2_7 = self.class_.load(fname_model_2_7)
fname_model_3_5 = datapath('ldamodel_python_3_5')
model_3_5 = self.class_.load(fname_model_3_5)
self.assertEqual(model_2_7.num_topics, model_3_5.num_topics)
self.assertTrue(np.allclose(model_2_7.expElogbeta, model_3_5.expElogbeta))
tstvec = []
self.assertTrue(np.allclose(model_2_7[tstvec], model_3_5[tstvec])) # try projecting an empty vector
id2word_2_7 = dict(model_2_7.id2word.iteritems())
id2word_3_5 = dict(model_3_5.id2word.iteritems())
self.assertEqual(set(id2word_2_7.keys()), set(id2word_3_5.keys()))
def testPersistenceIgnore(self):
fname = get_tmpfile('gensim_models_lda_testPersistenceIgnore.tst')
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(fname, ignore='id2word')
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
model.save(fname, ignore=['id2word'])
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_lda.tst.gz')
model = self.model
model.save(fname)
model2 = self.class_.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = get_tmpfile('gensim_models_lda.tst')
model = self.model
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
model2 = self.class_.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, np.memmap))
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = get_tmpfile('gensim_models_lda.tst.gz')
model = self.model
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')
def testRandomStateBackwardCompatibility(self):
# load a model saved using a pre-0.13.2 version of Gensim
pre_0_13_2_fname = datapath('pre_0_13_2_model')
model_pre_0_13_2 = self.class_.load(pre_0_13_2_fname)
# set `num_topics` less than `model_pre_0_13_2.num_topics` so that `model_pre_0_13_2.random_state` is used
model_topics = model_pre_0_13_2.print_topics(num_topics=2, num_words=3)
for i in model_topics:
self.assertTrue(isinstance(i[0], int))
self.assertTrue(isinstance(i[1], six.string_types))
# save back the loaded model using a post-0.13.2 version of Gensim
post_0_13_2_fname = get_tmpfile('gensim_models_lda_post_0_13_2_model.tst')
model_pre_0_13_2.save(post_0_13_2_fname)
# load a model saved using a post-0.13.2 version of Gensim
model_post_0_13_2 = self.class_.load(post_0_13_2_fname)
model_topics_new = model_post_0_13_2.print_topics(num_topics=2, num_words=3)
for i in model_topics_new:
self.assertTrue(isinstance(i[0], int))
self.assertTrue(isinstance(i[1], six.string_types))
def testDtypeBackwardCompatibility(self):
lda_3_0_1_fname = datapath('lda_3_0_1_model')
test_doc = [(0, 1), (1, 1), (2, 1)]
expected_topics = [(0, 0.87005886977475178), (1, 0.12994113022524822)]
# save model to use in test
# self.model.save(lda_3_0_1_fname)
# load a model saved using a 3.0.1 version of Gensim
model = self.class_.load(lda_3_0_1_fname)
# and test it on a predefined document
topics = model[test_doc]
self.assertTrue(np.allclose(expected_topics, topics))
# endclass TestLdaModel
class TestLdaMulticore(TestLdaModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamulticore.LdaMulticore
self.model = self.class_(corpus, id2word=dictionary, num_topics=2, passes=100)
# override LdaModel because multicore does not allow alpha=auto
def testAlphaAuto(self):
self.assertRaises(RuntimeError, self.class_, alpha='auto')
# endclass TestLdaMulticore
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 20,474 | 38.83463 | 114 | py |
poincare_glove | poincare_glove-master/gensim/test/test_api.py | import logging
import unittest
import os
import gensim.downloader as api
from gensim.downloader import base_dir
import shutil
import numpy as np
@unittest.skipIf(
os.environ.get("SKIP_NETWORK_TESTS", False) == "1",
"Skip network-related tests (probably SSL problems on this CI/OS)"
)
class TestApi(unittest.TestCase):
def test_base_dir_creation(self):
if os.path.isdir(base_dir):
shutil.rmtree(base_dir)
api._create_base_dir()
self.assertTrue(os.path.isdir(base_dir))
os.rmdir(base_dir)
def test_load_dataset(self):
dataset_path = os.path.join(base_dir, "__testing_matrix-synopsis", "__testing_matrix-synopsis.gz")
if os.path.isdir(base_dir):
shutil.rmtree(base_dir)
self.assertEqual(api.load("__testing_matrix-synopsis", return_path=True), dataset_path)
shutil.rmtree(base_dir)
self.assertEqual(len(list(api.load("__testing_matrix-synopsis"))), 1)
shutil.rmtree(base_dir)
def test_load_model(self):
if os.path.isdir(base_dir):
shutil.rmtree(base_dir)
vector_dead = np.array([
0.17403787, -0.10167074, -0.00950371, -0.10367849, -0.14034484,
-0.08751217, 0.10030612, 0.07677923, -0.32563496, 0.01929072,
0.20521086, -0.1617067, 0.00475458, 0.21956187, -0.08783089,
-0.05937332, 0.26528183, -0.06771874, -0.12369668, 0.12020949,
0.28731, 0.36735833, 0.28051138, -0.10407482, 0.2496888,
-0.19372769, -0.28719661, 0.11989869, -0.00393865, -0.2431484,
0.02725661, -0.20421691, 0.0328669, -0.26947051, -0.08068217,
-0.10245913, 0.1170633, 0.16583319, 0.1183883, -0.11217165,
0.1261425, -0.0319365, -0.15787181, 0.03753783, 0.14748634,
0.00414471, -0.02296237, 0.18336892, -0.23840059, 0.17924534
])
dataset_path = os.path.join(
base_dir, "__testing_word2vec-matrix-synopsis", "__testing_word2vec-matrix-synopsis.gz"
)
model = api.load("__testing_word2vec-matrix-synopsis")
vector_dead_calc = model["dead"]
self.assertTrue(np.allclose(vector_dead, vector_dead_calc))
shutil.rmtree(base_dir)
self.assertEqual(api.load("__testing_word2vec-matrix-synopsis", return_path=True), dataset_path)
shutil.rmtree(base_dir)
def test_multipart_load(self):
dataset_path = os.path.join(
base_dir, '__testing_multipart-matrix-synopsis', '__testing_multipart-matrix-synopsis.gz'
)
if os.path.isdir(base_dir):
shutil.rmtree(base_dir)
self.assertEqual(dataset_path, api.load("__testing_multipart-matrix-synopsis", return_path=True))
shutil.rmtree(base_dir)
dataset = api.load("__testing_multipart-matrix-synopsis")
self.assertEqual(len(list(dataset)), 1)
def test_info(self):
data = api.info("text8")
self.assertEqual(data["parts"], 1)
self.assertEqual(data["file_name"], 'text8.gz')
data = api.info()
self.assertEqual(sorted(data.keys()), sorted(['models', 'corpora']))
self.assertTrue(len(data['models']))
self.assertTrue(len(data['corpora']))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
| 3,371 | 41.15 | 106 | py |
poincare_glove | poincare_glove-master/gensim/test/test_hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import hdpmodel
from gensim.test import basetmtests
from gensim.test.utils import datapath, common_texts
import numpy as np
dictionary = Dictionary(common_texts)
corpus = [dictionary.doc2bow(text) for text in common_texts]
class TestHdpModel(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = hdpmodel.HdpModel
self.model = self.class_(corpus, id2word=dictionary, random_state=np.random.seed(0))
def testTopicValues(self):
"""
Check show topics method
"""
results = self.model.show_topics()[0]
expected_prob, expected_word = '0.264', 'trees '
prob, word = results[1].split('+')[0].split('*')
self.assertEqual(results[0], 0)
self.assertEqual(prob, expected_prob)
self.assertEqual(word, expected_word)
return
def testLDAmodel(self):
"""
Create ldamodel object, and check if the corresponding alphas are equal.
"""
ldam = self.model.suggested_lda_model()
self.assertEqual(ldam.alpha[0], self.model.lda_alpha[0])
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 1,679 | 29 | 96 | py |
poincare_glove | poincare_glove-master/gensim/test/test_summarization.py | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Mihalcea and Tarau (2004).
Mihalcea and Tarau (2004) introduces the TextRank summarization algorithm.
As a validation of the gensim implementation we reproduced its results
in this test.
"""
import os.path
import logging
import unittest
from gensim import utils
from gensim.corpora import Dictionary
from gensim.summarization import summarize, summarize_corpus, keywords, mz_keywords
class TestSummarizationTest(unittest.TestCase):
def _get_text_from_test_data(self, file):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, file), mode="r") as f:
return f.read()
def test_text_summarization(self):
text = self._get_text_from_test_data("mihalcea_tarau.txt")
# Makes a summary of the text.
generated_summary = summarize(text)
# To be compared to the method reference.
summary = self._get_text_from_test_data("mihalcea_tarau.summ.txt")
self.assertEqual(generated_summary, summary)
def test_corpus_summarization(self):
text = self._get_text_from_test_data("mihalcea_tarau.txt")
# Generate the corpus.
sentences = text.split("\n")
tokens = [sentence.split() for sentence in sentences]
dictionary = Dictionary(tokens)
corpus = [dictionary.doc2bow(sentence_tokens) for sentence_tokens in tokens]
# Extract the most important documents.
selected_documents = summarize_corpus(corpus)
# They are compared to the method reference.
summary = self._get_text_from_test_data("mihalcea_tarau.summ.txt")
summary = summary.split('\n')
# Each sentence in the document selection has to be in the model summary.
for doc_number, document in enumerate(selected_documents):
# Retrieves all words from the document.
words = [dictionary[token_id] for (token_id, count) in document]
# Asserts that all of them are in a sentence from the model reference.
self.assertTrue(any(all(word in sentence for word in words)) for sentence in summary)
def test_summary_from_unrelated_sentences(self):
# Tests that the summarization of a text with unrelated sentences is not empty string.
text = self._get_text_from_test_data("testsummarization_unrelated.txt")
generated_summary = summarize(text)
self.assertNotEqual(generated_summary, u"")
def test_text_summarization_on_short_input_text_is_empty_string(self):
text = self._get_text_from_test_data("testsummarization_unrelated.txt")
# Keeps the first 8 sentences to make the text shorter.
text = "\n".join(text.split('\n')[:8])
self.assertNotEqual(summarize(text), u"")
def test_text_summarization_raises_exception_on_single_input_sentence(self):
text = self._get_text_from_test_data("testsummarization_unrelated.txt")
# Keeps the first sentence only.
text = text.split('\n')[0]
self.assertRaises(ValueError, summarize, text)
def test_corpus_summarization_is_not_empty_list_on_short_input_text(self):
text = self._get_text_from_test_data("testsummarization_unrelated.txt")
# Keeps the first 8 sentences to make the text shorter.
sentences = text.split('\n')[:8]
# Generate the corpus.
tokens = [sentence.split() for sentence in sentences]
dictionary = Dictionary(tokens)
corpus = [dictionary.doc2bow(sentence_tokens) for sentence_tokens in tokens]
self.assertNotEqual(summarize_corpus(corpus), [])
def test_empty_text_summarization_is_empty_string(self):
self.assertEqual(summarize(""), u"")
def test_empty_text_summarization_with_split_is_empty_list(self):
self.assertEqual(summarize("", split=True), [])
def test_empty_corpus_summarization_is_empty_list(self):
self.assertEqual(summarize_corpus([]), [])
def test_corpus_summarization_ratio(self):
text = self._get_text_from_test_data("mihalcea_tarau.txt")
# Generate the corpus.
sentences = text.split('\n')
tokens = [sentence.split() for sentence in sentences]
dictionary = Dictionary(tokens)
corpus = [dictionary.doc2bow(sentence_tokens) for sentence_tokens in tokens]
# Makes summaries of the text using different ratio parameters.
for x in range(1, 10):
ratio = x / float(10)
selected_docs = summarize_corpus(corpus, ratio=ratio)
expected_summary_length = int(len(corpus) * ratio)
self.assertEqual(len(selected_docs), expected_summary_length)
def test_repeated_keywords(self):
text = self._get_text_from_test_data("testrepeatedkeywords.txt")
kwds = keywords(text)
self.assertTrue(len(kwds.splitlines()))
kwds_u = keywords(utils.to_unicode(text))
self.assertTrue(len(kwds_u.splitlines()))
kwds_lst = keywords(text, split=True)
self.assertTrue(len(kwds_lst))
def test_keywords_runs(self):
text = self._get_text_from_test_data("mihalcea_tarau.txt")
kwds = keywords(text)
self.assertTrue(len(kwds.splitlines()))
kwds_u = keywords(utils.to_unicode(text))
self.assertTrue(len(kwds_u.splitlines()))
kwds_lst = keywords(text, split=True)
self.assertTrue(len(kwds_lst))
def test_mz_keywords(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "head500.noblanks.cor")) as f:
text = utils.to_unicode(f.read())
text = u' '.join(text.split()[:10240])
kwds = mz_keywords(text)
self.assertTrue(kwds.startswith('autism'))
self.assertTrue(kwds.endswith('uk'))
self.assertTrue(len(kwds.splitlines()))
kwds_lst = mz_keywords(text, split=True)
self.assertTrue(len(kwds_lst))
# Automatic thresholding selects words with n_blocks / n_blocks+1
# bits of entropy. For this text, n_blocks=10
n_blocks = 10.
kwds_auto = mz_keywords(text, scores=True, weighted=False, threshold='auto')
self.assertTrue(kwds_auto[-1][1] > (n_blocks / (n_blocks + 1.)))
def test_low_distinct_words_corpus_summarization_is_empty_list(self):
text = self._get_text_from_test_data("testlowdistinctwords.txt")
# Generate the corpus.
sentences = text.split("\n")
tokens = [sentence.split() for sentence in sentences]
dictionary = Dictionary(tokens)
corpus = [dictionary.doc2bow(sentence_tokens) for sentence_tokens in tokens]
self.assertEqual(summarize_corpus(corpus), [])
def test_low_distinct_words_summarization_is_empty_string(self):
text = self._get_text_from_test_data("testlowdistinctwords.txt")
self.assertEqual(summarize(text), u"")
def test_low_distinct_words_summarization_with_split_is_empty_list(self):
text = self._get_text_from_test_data("testlowdistinctwords.txt")
self.assertEqual(summarize(text, split=True), [])
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 7,447 | 37.590674 | 97 | py |
poincare_glove | poincare_glove-master/gensim/test/test_datatype.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various matutils functions.
"""
import logging
import unittest
import numpy as np
from gensim.test.utils import datapath
from gensim.models.keyedvectors import KeyedVectors
class TestDataType(unittest.TestCase):
def load_model(self, datatype):
path = datapath('high_precision.kv.txt')
kv = KeyedVectors.load_word2vec_format(path, binary=False,
datatype=datatype)
return kv
def test_high_precision(self):
kv = self.load_model(np.float64)
self.assertAlmostEqual(kv['horse.n.01'][0], -0.0008546282343595379)
self.assertEqual(kv['horse.n.01'][0].dtype, np.float64)
def test_medium_precision(self):
kv = self.load_model(np.float32)
self.assertAlmostEqual(kv['horse.n.01'][0], -0.00085462822)
self.assertEqual(kv['horse.n.01'][0].dtype, np.float32)
def test_low_precision(self):
kv = self.load_model(np.float16)
self.assertAlmostEqual(kv['horse.n.01'][0], -0.00085449)
self.assertEqual(kv['horse.n.01'][0].dtype, np.float16)
def test_type_conversion(self):
path = datapath('high_precision.kv.txt')
binary_path = datapath('high_precision.kv.bin')
model1 = KeyedVectors.load_word2vec_format(path, datatype=np.float16)
model1.save_word2vec_format(binary_path, binary=True)
model2 = KeyedVectors.load_word2vec_format(binary_path, datatype=np.float64, binary=True)
self.assertAlmostEqual(model1["horse.n.01"][0], np.float16(model2["horse.n.01"][0]))
self.assertEqual(model1["horse.n.01"][0].dtype, np.float16)
self.assertEqual(model2["horse.n.01"][0].dtype, np.float64)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 1,944 | 34.363636 | 97 | py |
poincare_glove | poincare_glove-master/gensim/test/test_rpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import rpmodel
from gensim import matutils
from gensim.test.utils import datapath, get_tmpfile
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
# HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
np.random.seed(13)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.70710677, 0.70710677])
self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign
def testPersistence(self):
fname = get_tmpfile('gensim_models.tst')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,349 | 35.153846 | 118 | py |
poincare_glove | poincare_glove-master/gensim/test/test_keywords.py | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Mihalcea and Tarau (2004).
Mihalcea and Tarau (2004) introduces the TextRank summarization algorithm.
As a validation of the gensim implementation we reproduced its results
in this test.
"""
import os.path
import logging
import unittest
from gensim import utils
from gensim.summarization import keywords
class TestKeywordsTest(unittest.TestCase):
def test_text_keywords(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate keywords
generated_keywords = keywords(text, split=True)
# To be compared to the reference.
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.kw.txt"), mode="r") as f:
kw = f.read().strip().split("\n")
self.assertEqual({str(x) for x in generated_keywords}, {str(x) for x in kw})
def test_text_keywords_words(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate exactly 13 keywords
generated_keywords = keywords(text, words=15, split=True)
self.assertEqual(len(generated_keywords), 16)
def test_text_keywords_pos(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# calculate keywords using only certain parts of speech
generated_keywords_nnvbjj = keywords(text, pos_filter=['NN', 'VB', 'JJ'], ratio=0.3, split=True)
# To be compared to the reference.
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.kwpos.txt"), mode="r") as f:
kw = f.read().strip().split("\n")
self.assertEqual({str(x) for x in generated_keywords_nnvbjj}, {str(x) for x in kw})
def test_text_summarization_raises_exception_on_short_input_text(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "testsummarization_unrelated.txt"), mode="r") as f:
text = f.read()
# Keeps the first 8 sentences to make the text shorter.
text = "\n".join(text.split('\n')[:8])
self.assertTrue(keywords(text) is not None)
def test_keywords_ratio(self):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
with utils.smart_open(os.path.join(pre_path, "mihalcea_tarau.txt"), mode="r") as f:
text = f.read()
# Check ratio parameter is well behaved. Because length is taken on tokenized clean text
# we just check that ratio 20% is twice as long as ratio 10%
# Values of 10% and 20% were carefully selected for this test to avoid
# numerical instabilities when several keywords have almost the same score
selected_docs_12 = keywords(text, ratio=0.1, split=True)
selected_docs_21 = keywords(text, ratio=0.2, split=True)
self.assertAlmostEqual(float(len(selected_docs_21)) / len(selected_docs_12), float(21) / 12, places=1)
def test_text_keywords_with_small_graph(self):
# regression test, we get graph 2x2 on this text
text = 'IT: Utilities A look at five utilities to make your PCs more, efficient, effective, and efficacious'
kwds = keywords(text, words=1, split=True)
self.assertTrue(len(kwds))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 3,889 | 37.137255 | 116 | py |
poincare_glove | poincare_glove-master/gensim/test/test_tfidfmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import tfidfmodel
from gensim.test.utils import datapath, get_tmpfile, common_dictionary, common_corpus
from gensim.corpora import Dictionary
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(np.allclose(transformed, expected))
def testInit(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(common_corpus)
dfs = common_dictionary.dfs
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dfs, len(common_corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=common_dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def testPersistence(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="ntc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="ntc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst'))
self.assertTrue(model3.idfs == model4.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
def testPersistenceCompressed(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="ntc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="ntc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst.bz2'))
self.assertTrue(model3.idfs == model4.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
def TestConsistency(self):
docs = [corpus[1], corpus[2]]
# Test if `ntc` yields the default docs.
model = tfidfmodel.TfidfModel(self.corpus, smartirs='ntc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(self.corpus)
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `wlocal`
# nnn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='nnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 2),
(4, 2),
(5, 3),
(6, 2),
(7, 3),
(8, 2)],
[(5, 6),
(9, 3),
(10, 3)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# lnn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 2.0),
(4, 2.0),
(5, 3.0),
(6, 2.0),
(7, 3.0),
(8, 2.0)],
[(5, 6.0),
(9, 3.0),
(10, 3.0)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# ann
model = tfidfmodel.TfidfModel(self.corpus, smartirs='ann')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 2.0),
(4, 2.0),
(5, 3.0),
(6, 2.0),
(7, 3.0),
(8, 2.0)],
[(5, 3.0),
(9, 2.25),
(10, 2.25)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# bnn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='bnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 2),
(4, 2),
(5, 3),
(6, 2),
(7, 3),
(8, 2)],
[(5, 3),
(9, 3),
(10, 3)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Lnn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='Lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 1.4635792826230198),
(4, 1.4635792826230198),
(5, 2.19536892393453),
(6, 1.4635792826230198),
(7, 2.19536892393453),
(8, 1.4635792826230198)],
[(5, 3.627141918134611),
(9, 1.8135709590673055),
(10, 1.8135709590673055)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `glocal`
# ntn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='ntn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 2.1699250014423126),
(4, 2.1699250014423126),
(5, 1.5849625007211563),
(6, 2.1699250014423126),
(7, 1.5849625007211563),
(8, 2.1699250014423126)],
[(5, 3.1699250014423126),
(9, 1.5849625007211563),
(10, 1.5849625007211563)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# npn
model = tfidfmodel.TfidfModel(self.corpus, smartirs='npn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 1.8073549220576042),
(4, 1.8073549220576042),
(5, 1.0),
(6, 1.8073549220576042),
(7, 1.0),
(8, 1.8073549220576042)],
[(5, 2.0),
(9, 1.0),
(10, 1.0)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `normalize`
# nnc
model = tfidfmodel.TfidfModel(self.corpus, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [[(3, 0.34299717028501764),
(4, 0.34299717028501764),
(5, 0.51449575542752646),
(6, 0.34299717028501764),
(7, 0.51449575542752646),
(8, 0.34299717028501764)],
[(5, 0.81649658092772603),
(9, 0.40824829046386302),
(10, 0.40824829046386302)]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
model = tfidfmodel.TfidfModel(self.corpus, wlocal=lambda x: x, wglobal=lambda x, y: x * x, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(self.corpus, wlocal=lambda x: x * x, wglobal=lambda x, y: x, smartirs='nnc')
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 12,442 | 41.906897 | 114 | py |
poincare_glove | poincare_glove-master/gensim/test/test_sklearn_api.py | import unittest
import numpy
import codecs
import pickle
from scipy import sparse
try:
from sklearn.pipeline import Pipeline
from sklearn import linear_model, cluster
from sklearn.exceptions import NotFittedError
except ImportError:
raise unittest.SkipTest("Test requires scikit-learn to be installed, which is not available")
from gensim.sklearn_api.rpmodel import RpTransformer
from gensim.sklearn_api.ldamodel import LdaTransformer
from gensim.sklearn_api.lsimodel import LsiTransformer
from gensim.sklearn_api.ldaseqmodel import LdaSeqTransformer
from gensim.sklearn_api.w2vmodel import W2VTransformer
from gensim.sklearn_api.atmodel import AuthorTopicTransformer
from gensim.sklearn_api.d2vmodel import D2VTransformer
from gensim.sklearn_api.text2bow import Text2BowTransformer
from gensim.sklearn_api.tfidf import TfIdfTransformer
from gensim.sklearn_api.hdp import HdpTransformer
from gensim.sklearn_api.phrases import PhrasesTransformer
from gensim.corpora import mmcorpus, Dictionary
from gensim import matutils, models
from gensim.test.utils import datapath, common_texts
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
author2doc = {
'john': [0, 1, 2, 3, 4, 5, 6],
'jane': [2, 3, 4, 5, 6, 7, 8],
'jack': [0, 2, 4, 6, 8],
'jill': [1, 3, 5, 7]
}
texts_new = texts[0:3]
author2doc_new = {
'jill': [0],
'bob': [0, 1],
'sally': [1, 2]
}
dictionary_new = Dictionary(texts_new)
corpus_new = [dictionary_new.doc2bow(text) for text in texts_new]
texts_ldaseq = [
[
u'senior', u'studios', u'studios', u'studios', u'creators', u'award', u'mobile', u'currently',
u'challenges', u'senior', u'summary', u'senior', u'motivated', u'creative', u'senior'
],
[
u'performs', u'engineering', u'tasks', u'infrastructure', u'focusing', u'primarily', u'programming',
u'interaction', u'designers', u'engineers', u'leadership', u'teams', u'teams', u'crews', u'responsibilities',
u'engineering', u'quality', u'functional', u'functional', u'teams', u'organizing', u'prioritizing',
u'technical', u'decisions', u'engineering', u'participates', u'participates', u'reviews', u'participates',
u'hiring', u'conducting', u'interviews'
],
[
u'feedback', u'departments', u'define', u'focusing', u'engineering', u'teams', u'crews', u'facilitate',
u'engineering', u'departments', u'deadlines', u'milestones', u'typically', u'spends', u'designing',
u'developing', u'updating', u'bugs', u'mentoring', u'engineers', u'define', u'schedules', u'milestones',
u'participating'
],
[
u'reviews', u'interviews', u'sized', u'teams', u'interacts', u'disciplines', u'knowledge', u'skills',
u'knowledge', u'knowledge', u'xcode', u'scripting', u'debugging', u'skills', u'skills', u'knowledge',
u'disciplines', u'animation', u'networking', u'expertise', u'competencies', u'oral', u'skills',
u'management', u'skills', u'proven', u'effectively', u'teams', u'deadline', u'environment', u'bachelor',
u'minimum', u'shipped', u'leadership', u'teams', u'location', u'resumes', u'jobs', u'candidates',
u'openings', u'jobs'
],
[
u'maryland', u'client', u'producers', u'electricity', u'operates', u'storage', u'utility', u'retail',
u'customers', u'engineering', u'consultant', u'maryland', u'summary', u'technical', u'technology',
u'departments', u'expertise', u'maximizing', u'output', u'reduces', u'operating', u'participates',
u'areas', u'engineering', u'conducts', u'testing', u'solve', u'supports', u'environmental', u'understands',
u'objectives', u'operates', u'responsibilities', u'handles', u'complex', u'engineering', u'aspects',
u'monitors', u'quality', u'proficiency', u'optimization', u'recommendations', u'supports', u'personnel',
u'troubleshooting', u'commissioning', u'startup', u'shutdown', u'supports', u'procedure', u'operating',
u'units', u'develops', u'simulations', u'troubleshooting', u'tests', u'enhancing', u'solving', u'develops',
u'estimates', u'schedules', u'scopes', u'understands', u'technical', u'management', u'utilize', u'routine',
u'conducts', u'hazards', u'utilizing', u'hazard', u'operability', u'methodologies', u'participates',
u'startup', u'reviews', u'pssr', u'participate', u'teams', u'participate', u'regulatory', u'audits',
u'define', u'scopes', u'budgets', u'schedules', u'technical', u'management', u'environmental', u'awareness',
u'interfacing', u'personnel', u'interacts', u'regulatory', u'departments', u'input', u'objectives',
u'identifying', u'introducing', u'concepts', u'solutions', u'peers', u'customers', u'coworkers', u'knowledge',
u'skills', u'engineering', u'quality', u'engineering'
],
[
u'commissioning', u'startup', u'knowledge', u'simulators', u'technologies', u'knowledge', u'engineering',
u'techniques', u'disciplines', u'leadership', u'skills', u'proven', u'engineers', u'oral', u'skills',
u'technical', u'skills', u'analytically', u'solve', u'complex', u'interpret', u'proficiency', u'simulation',
u'knowledge', u'applications', u'manipulate', u'applications', u'engineering'
],
[
u'calculations', u'programs', u'matlab', u'excel', u'independently', u'environment', u'proven', u'skills',
u'effectively', u'multiple', u'tasks', u'planning', u'organizational', u'management', u'skills', u'rigzone',
u'jobs', u'developer', u'exceptional', u'strategies', u'junction', u'exceptional', u'strategies', u'solutions',
u'solutions', u'biggest', u'insurers', u'operates', u'investment'
],
[
u'vegas', u'tasks', u'electrical', u'contracting', u'expertise', u'virtually', u'electrical', u'developments',
u'institutional', u'utilities', u'technical', u'experts', u'relationships', u'credibility', u'contractors',
u'utility', u'customers', u'customer', u'relationships', u'consistently', u'innovations', u'profile',
u'construct', u'envision', u'dynamic', u'complex', u'electrical', u'management', u'grad', u'internship',
u'electrical', u'engineering', u'infrastructures', u'engineers', u'documented', u'management', u'engineering',
u'quality', u'engineering', u'electrical', u'engineers', u'complex', u'distribution', u'grounding',
u'estimation', u'testing', u'procedures', u'voltage', u'engineering'
],
[
u'troubleshooting', u'installation', u'documentation', u'bsee', u'certification', u'electrical', u'voltage',
u'cabling', u'electrical', u'engineering', u'candidates', u'electrical', u'internships', u'oral', u'skills',
u'organizational', u'prioritization', u'skills', u'skills', u'excel', u'cadd', u'calculation', u'autocad',
u'mathcad', u'skills', u'skills', u'customer', u'relationships', u'solving', u'ethic', u'motivation', u'tasks',
u'budget', u'affirmative', u'diversity', u'workforce', u'gender', u'orientation', u'disability', u'disabled',
u'veteran', u'vietnam', u'veteran', u'qualifying', u'veteran', u'diverse', u'candidates', u'respond',
u'developing', u'workplace', u'reflects', u'diversity', u'communities', u'reviews', u'electrical',
u'contracting', u'southwest', u'electrical', u'contractors'
],
[
u'intern', u'electrical', u'engineering', u'idexx', u'laboratories', u'validating', u'idexx', u'integrated',
u'hardware', u'entails', u'planning', u'debug', u'validation', u'engineers', u'validation', u'methodologies',
u'healthcare', u'platforms', u'brightest', u'solve', u'challenges', u'innovation', u'technology', u'idexx',
u'intern', u'idexx', u'interns', u'supplement', u'interns', u'teams', u'roles', u'competitive', u'interns',
u'idexx', u'interns', u'participate', u'internships', u'mentors', u'seminars', u'topics', u'leadership',
u'workshops', u'relevant', u'planning', u'topics', u'intern', u'presentations', u'mixers', u'applicants',
u'ineligible', u'laboratory', u'compliant', u'idexx', u'laboratories', u'healthcare', u'innovation',
u'practicing', u'veterinarians', u'diagnostic', u'technology', u'idexx', u'enhance', u'veterinarians',
u'efficiency', u'economically', u'idexx', u'worldwide', u'diagnostic', u'tests', u'tests', u'quality',
u'headquartered', u'idexx', u'laboratories', u'employs', u'customers', u'qualifications', u'applicants',
u'idexx', u'interns', u'potential', u'demonstrated', u'portfolio', u'recommendation', u'resumes', u'marketing',
u'location', u'americas', u'verification', u'validation', u'schedule', u'overtime', u'idexx', u'laboratories',
u'reviews', u'idexx', u'laboratories', u'nasdaq', u'healthcare', u'innovation', u'practicing', u'veterinarians'
],
[
u'location', u'duration', u'temp', u'verification', u'validation', u'tester', u'verification', u'validation',
u'middleware', u'specifically', u'testing', u'applications', u'clinical', u'laboratory', u'regulated',
u'environment', u'responsibilities', u'complex', u'hardware', u'testing', u'clinical', u'analyzers',
u'laboratory', u'graphical', u'interfaces', u'complex', u'sample', u'sequencing', u'protocols', u'developers',
u'correction', u'tracking', u'tool', u'timely', u'troubleshoot', u'testing', u'functional', u'manual',
u'automated', u'participate', u'ongoing'
],
[
u'testing', u'coverage', u'planning', u'documentation', u'testing', u'validation', u'corrections', u'monitor',
u'implementation', u'recurrence', u'operating', u'statistical', u'quality', u'testing', u'global', u'multi',
u'teams', u'travel', u'skills', u'concepts', u'waterfall', u'agile', u'methodologies', u'debugging', u'skills',
u'complex', u'automated', u'instrumentation', u'environment', u'hardware', u'mechanical', u'components',
u'tracking', u'lifecycle', u'management', u'quality', u'organize', u'define', u'priorities', u'organize',
u'supervision', u'aggressive', u'deadlines', u'ambiguity', u'analyze', u'complex', u'situations', u'concepts',
u'technologies', u'verbal', u'skills', u'effectively', u'technical', u'clinical', u'diverse', u'strategy',
u'clinical', u'chemistry', u'analyzer', u'laboratory', u'middleware', u'basic', u'automated', u'testing',
u'biomedical', u'engineering', u'technologists', u'laboratory', u'technology', u'availability', u'click',
u'attach'
],
[
u'scientist', u'linux', u'asrc', u'scientist', u'linux', u'asrc', u'technology', u'solutions', u'subsidiary',
u'asrc', u'engineering', u'technology', u'contracts'
],
[
u'multiple', u'agencies', u'scientists', u'engineers', u'management', u'personnel', u'allows', u'solutions',
u'complex', u'aeronautics', u'aviation', u'management', u'aviation', u'engineering', u'hughes', u'technical',
u'technical', u'aviation', u'evaluation', u'engineering', u'management', u'technical', u'terminal',
u'surveillance', u'programs', u'currently', u'scientist', u'travel', u'responsibilities', u'develops',
u'technology', u'modifies', u'technical', u'complex', u'reviews', u'draft', u'conformity', u'completeness',
u'testing', u'interface', u'hardware', u'regression', u'impact', u'reliability', u'maintainability',
u'factors', u'standardization', u'skills', u'travel', u'programming', u'linux', u'environment', u'cisco',
u'knowledge', u'terminal', u'environment', u'clearance', u'clearance', u'input', u'output', u'digital',
u'automatic', u'terminal', u'management', u'controller', u'termination', u'testing', u'evaluating', u'policies',
u'procedure', u'interface', u'installation', u'verification', u'certification', u'core', u'avionic',
u'programs', u'knowledge', u'procedural', u'testing', u'interfacing', u'hardware', u'regression', u'impact',
u'reliability', u'maintainability', u'factors', u'standardization', u'missions', u'asrc', u'subsidiaries',
u'affirmative', u'employers', u'applicants', u'disability', u'veteran', u'technology', u'location', u'airport',
u'bachelor', u'schedule', u'travel', u'contributor', u'management', u'asrc', u'reviews'
],
[
u'technical', u'solarcity', u'niche', u'vegas', u'overview', u'resolving', u'customer', u'clients',
u'expanding', u'engineers', u'developers', u'responsibilities', u'knowledge', u'planning', u'adapt',
u'dynamic', u'environment', u'inventive', u'creative', u'solarcity', u'lifecycle', u'responsibilities',
u'technical', u'analyzing', u'diagnosing', u'troubleshooting', u'customers', u'ticketing', u'console',
u'escalate', u'knowledge', u'engineering', u'timely', u'basic', u'phone', u'functionality', u'customer',
u'tracking', u'knowledgebase', u'rotation', u'configure', u'deployment', u'sccm', u'technical', u'deployment',
u'deploy', u'hardware', u'solarcity', u'bachelor', u'knowledge', u'dell', u'laptops', u'analytical',
u'troubleshooting', u'solving', u'skills', u'knowledge', u'databases', u'preferably', u'server', u'preferably',
u'monitoring', u'suites', u'documentation', u'procedures', u'knowledge', u'entries', u'verbal', u'skills',
u'customer', u'skills', u'competitive', u'solar', u'package', u'insurance', u'vacation', u'savings',
u'referral', u'eligibility', u'equity', u'performers', u'solarcity', u'affirmative', u'diversity', u'workplace',
u'applicants', u'orientation', u'disability', u'veteran', u'careerrookie'
],
[
u'embedded', u'exelis', u'junction', u'exelis', u'embedded', u'acquisition', u'networking', u'capabilities',
u'classified', u'customer', u'motivated', u'develops', u'tests', u'innovative', u'solutions', u'minimal',
u'supervision', u'paced', u'environment', u'enjoys', u'assignments', u'interact', u'multi', u'disciplined',
u'challenging', u'focused', u'embedded', u'developments', u'spanning', u'engineering', u'lifecycle',
u'specification', u'enhancement', u'applications', u'embedded', u'freescale', u'applications', u'android',
u'platforms', u'interface', u'customers', u'developers', u'refine', u'specifications', u'architectures'
],
[
u'java', u'programming', u'scripts', u'python', u'debug', u'debugging', u'emulators', u'regression',
u'revisions', u'specialized', u'setups', u'capabilities', u'subversion', u'technical', u'documentation',
u'multiple', u'engineering', u'techexpousa', u'reviews'
],
[
u'modeler', u'semantic', u'modeling', u'models', u'skills', u'ontology', u'resource', u'framework', u'schema',
u'technologies', u'hadoop', u'warehouse', u'oracle', u'relational', u'artifacts', u'models', u'dictionaries',
u'models', u'interface', u'specifications', u'documentation', u'harmonization', u'mappings', u'aligned',
u'coordinate', u'technical', u'peer', u'reviews', u'stakeholder', u'communities', u'impact', u'domains',
u'relationships', u'interdependencies', u'models', u'define', u'analyze', u'legacy', u'models', u'corporate',
u'databases', u'architectural', u'alignment', u'customer', u'expertise', u'harmonization', u'modeling',
u'modeling', u'consulting', u'stakeholders', u'quality', u'models', u'storage', u'agile', u'specifically',
u'focus', u'modeling', u'qualifications', u'bachelors', u'accredited', u'modeler', u'encompass', u'evaluation',
u'skills', u'knowledge', u'modeling', u'techniques', u'resource', u'framework', u'schema', u'technologies',
u'unified', u'modeling', u'technologies', u'schemas', u'ontologies', u'sybase', u'knowledge', u'skills',
u'interpersonal', u'skills', u'customers', u'clearance', u'applicants', u'eligibility', u'classified',
u'clearance', u'polygraph', u'techexpousa', u'solutions', u'partnership', u'solutions', u'integration'
],
[
u'technologies', u'junction', u'develops', u'maintains', u'enhances', u'complex', u'diverse', u'intensive',
u'analytics', u'algorithm', u'manipulation', u'management', u'documented', u'individually', u'reviews',
u'tests', u'components', u'adherence', u'resolves', u'utilizes', u'methodologies', u'environment', u'input',
u'components', u'hardware', u'offs', u'reuse', u'cots', u'gots', u'synthesis', u'components', u'tasks',
u'individually', u'analyzes', u'modifies', u'debugs', u'corrects', u'integrates', u'operating',
u'environments', u'develops', u'queries', u'databases', u'repositories', u'recommendations', u'improving',
u'documentation', u'develops', u'implements', u'algorithms', u'functional', u'assists', u'developing',
u'executing', u'procedures', u'components', u'reviews', u'documentation', u'solutions', u'analyzing',
u'conferring', u'users', u'engineers', u'analyzing', u'investigating', u'areas', u'adapt', u'hardware',
u'mathematical', u'models', u'predict', u'outcome', u'implement', u'complex', u'database', u'repository',
u'interfaces', u'queries', u'bachelors', u'accredited', u'substituted', u'bachelors', u'firewalls',
u'ipsec', u'vpns', u'technology', u'administering', u'servers', u'apache', u'jboss', u'tomcat',
u'developing', u'interfaces', u'firefox', u'internet', u'explorer', u'operating', u'mainframe',
u'linux', u'solaris', u'virtual', u'scripting', u'programming', u'oriented', u'programming', u'ajax',
u'script', u'procedures', u'cobol', u'cognos', u'fusion', u'focus', u'html', u'java', u'java', u'script',
u'jquery', u'perl', u'visual', u'basic', u'powershell', u'cots', u'cots', u'oracle', u'apex', u'integration',
u'competitive', u'package', u'bonus', u'corporate', u'equity', u'tuition', u'reimbursement', u'referral',
u'bonus', u'holidays', u'insurance', u'flexible', u'disability', u'insurance'
],
[u'technologies', u'disability', u'accommodation', u'recruiter', u'techexpousa'],
['bank', 'river', 'shore', 'water'],
['river', 'water', 'flow', 'fast', 'tree'],
['bank', 'water', 'fall', 'flow'],
['bank', 'bank', 'water', 'rain', 'river'],
['river', 'water', 'mud', 'tree'],
['money', 'transaction', 'bank', 'finance'],
['bank', 'borrow', 'money'],
['bank', 'finance'],
['finance', 'money', 'sell', 'bank'],
['borrow', 'sell'],
['bank', 'loan', 'sell']
]
dictionary_ldaseq = Dictionary(texts_ldaseq)
corpus_ldaseq = [dictionary_ldaseq.doc2bow(text) for text in texts_ldaseq]
w2v_texts = [
['calculus', 'is', 'the', 'mathematical', 'study', 'of', 'continuous', 'change'],
['geometry', 'is', 'the', 'study', 'of', 'shape'],
['algebra', 'is', 'the', 'study', 'of', 'generalizations', 'of', 'arithmetic', 'operations'],
['differential', 'calculus', 'is', 'related', 'to', 'rates', 'of', 'change', 'and', 'slopes', 'of', 'curves'],
['integral', 'calculus', 'is', 'realted', 'to', 'accumulation', 'of', 'quantities', 'and',
'the', 'areas', 'under', 'and', 'between', 'curves'],
['physics', 'is', 'the', 'natural', 'science', 'that', 'involves', 'the', 'study', 'of', 'matter',
'and', 'its', 'motion', 'and', 'behavior', 'through', 'space', 'and', 'time'],
['the', 'main', 'goal', 'of', 'physics', 'is', 'to', 'understand', 'how', 'the', 'universe', 'behaves'],
['physics', 'also', 'makes', 'significant', 'contributions', 'through', 'advances', 'in', 'new',
'technologies', 'that', 'arise', 'from', 'theoretical', 'breakthroughs'],
['advances', 'in', 'the', 'understanding', 'of', 'electromagnetism', 'or', 'nuclear', 'physics',
'led', 'directly', 'to', 'the', 'development', 'of', 'new', 'products', 'that', 'have', 'dramatically',
'transformed', 'modern', 'day', 'society']
]
d2v_sentences = [models.doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(w2v_texts)]
dict_texts = [' '.join(text) for text in common_texts]
phrases_sentences = common_texts + [
['graph', 'minors', 'survey', 'human', 'interface']
]
class TestLdaWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = LdaTransformer(
id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0)
)
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([0.13, 0.87])
passed = numpy.allclose(sorted(transformed[0]), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testConsistencyWithGensimModel(self):
# training an LdaTransformer with `num_topics`=10
self.model = LdaTransformer(
id2word=dictionary, num_topics=10, passes=100, minimum_probability=0, random_state=numpy.random.seed(0)
)
self.model.fit(corpus)
# training a Gensim LdaModel with the same params
gensim_ldamodel = models.LdaModel(
corpus=corpus, id2word=dictionary, num_topics=10, passes=100,
minimum_probability=0, random_state=numpy.random.seed(0)
)
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix_transformer_api = self.model.transform(bow)
matrix_gensim_model = gensim_ldamodel[bow]
# convert into dense representation to be able to compare with transformer output
matrix_gensim_model_dense = matutils.sparse2full(matrix_gensim_model, 10)
passed = numpy.allclose(matrix_transformer_api, matrix_gensim_model_dense, atol=1e-1)
self.assertTrue(passed)
def testCSRMatrixConversion(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sarr = sparse.csr_matrix(arr)
newmodel = LdaTransformer(num_topics=2, passes=100)
newmodel.fit(sarr)
bow = [(0, 1), (1, 2), (2, 0)]
transformed_vec = newmodel.transform(bow)
expected_vec = numpy.array([0.12843782, 0.87156218])
passed = numpy.allclose(transformed_vec, expected_vec, atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('features', model,), ('classifier', clf)])
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreaterEqual(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"eval_every": 20, "decay": 0.7}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'eval_every'), 20)
self.assertEqual(getattr(self.model.gensim_model, 'decay'), 0.7)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(loaded_matrix, original_matrix, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lda_wrapper = LdaTransformer(
id2word=dictionary, num_topics=2, passes=100,
minimum_probability=0, random_state=numpy.random.seed(0)
)
texts_new = ['graph', 'eulerian']
bow = lda_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lda_wrapper.transform, bow)
class TestLsiWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = LsiTransformer(id2word=dictionary, num_topics=2)
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([1.39, 0.0])
passed = numpy.allclose(transformed[0], expected, atol=1)
self.assertTrue(passed)
def testPipeline(self):
model = LsiTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lsi = Pipeline([('features', model,), ('classifier', clf)])
text_lsi.fit(corpus, data.target)
score = text_lsi.score(corpus, data.target)
self.assertGreater(score, 0.50)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"chunksize": 10000, "decay": 0.9}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'chunksize'), 10000)
self.assertEqual(getattr(self.model.gensim_model, 'decay'), 0.9)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(loaded_matrix, original_matrix, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lsi_wrapper = LsiTransformer(id2word=dictionary, num_topics=2)
texts_new = ['graph', 'eulerian']
bow = lsi_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lsi_wrapper.transform, bow)
class TestLdaSeqWrapper(unittest.TestCase):
def setUp(self):
self.model = LdaSeqTransformer(
id2word=dictionary_ldaseq, num_topics=2, time_slice=[10, 10, 11], initialize='gensim'
)
self.model.fit(corpus_ldaseq)
def testTransform(self):
# transforming two documents
docs = [list(corpus_ldaseq)[0], list(corpus_ldaseq)[1]]
transformed_vecs = self.model.transform(docs)
self.assertEqual(transformed_vecs.shape[0], 2)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
# transforming one document
doc = list(corpus_ldaseq)[0]
transformed_vecs = self.model.transform(doc)
self.assertEqual(transformed_vecs.shape[0], 1)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
test_data = data.data[0:2]
test_target = data.target[0:2]
id2word = Dictionary([x.split() for x in test_data])
corpus = [id2word.doc2bow(i.split()) for i in test_data]
model = LdaSeqTransformer(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_ldaseq = Pipeline([('features', model,), ('classifier', clf)])
text_ldaseq.fit(corpus, test_target)
score = text_ldaseq.score(corpus, test_target)
self.assertGreater(score, 0.50)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus_ldaseq)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(corpus_ldaseq)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
ldaseq_wrapper = LdaSeqTransformer(num_topics=2)
doc = list(corpus_ldaseq)[0]
self.assertRaises(NotFittedError, ldaseq_wrapper.transform, doc)
class TestRpWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(13)
self.model = RpTransformer(num_topics=2)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform two documents
docs = [list(self.corpus)[0], list(self.corpus)[1]]
matrix = self.model.transform(docs)
self.assertEqual(matrix.shape[0], 2)
self.assertEqual(matrix.shape[1], self.model.num_topics)
# tranform one document
doc = list(self.corpus)[0]
matrix = self.model.transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = RpTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_rp = Pipeline([('features', model,), ('classifier', clf)])
text_rp.fit(corpus, data.target)
score = text_rp.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(self.corpus)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
rpmodel_wrapper = RpTransformer(num_topics=2)
doc = list(self.corpus)[0]
self.assertRaises(NotFittedError, rpmodel_wrapper.transform, doc)
class TestWord2VecWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = W2VTransformer(size=10, min_count=0, seed=42)
self.model.fit(texts)
def testTransform(self):
# tranform multiple words
words = []
words = words + texts[0]
matrix = self.model.transform(words)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.size)
# tranform one word
word = texts[0][0]
matrix = self.model.transform(word)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.size)
def testConsistencyWithGensimModel(self):
# training a W2VTransformer
self.model = W2VTransformer(size=10, min_count=0, seed=42)
self.model.fit(texts)
# training a Gensim Word2Vec model with the same params
gensim_w2vmodel = models.Word2Vec(texts, size=10, min_count=0, seed=42)
word = texts[0][0]
vec_transformer_api = self.model.transform(word) # vector returned by W2VTransformer
vec_gensim_model = gensim_w2vmodel[word] # vector returned by Word2Vec
passed = numpy.allclose(vec_transformer_api, vec_gensim_model, atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = W2VTransformer(size=10, min_count=1)
model.fit(w2v_texts)
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
('calculus', 'mathematics'), ('mathematical', 'mathematics'),
('geometry', 'mathematics'), ('operations', 'mathematics'),
('curves', 'mathematics'), ('natural', 'physics'), ('nuclear', 'physics'),
('science', 'physics'), ('electromagnetism', 'physics'), ('natural', 'physics')
]
train_input = [x[0] for x in train_data]
train_target = [class_dict[x[1]] for x in train_data]
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
clf.fit(model.transform(train_input), train_target)
text_w2v = Pipeline([('features', model,), ('classifier', clf)])
score = text_w2v.score(train_input, train_target)
self.assertGreater(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(negative=20)
model_params = self.model.get_params()
self.assertEqual(model_params["negative"], 20)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(texts)
self.assertEqual(getattr(self.model.gensim_model, 'negative'), 20)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
word = texts[0][0]
loaded_transformed_vecs = model_load.transform(word)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.size)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(word)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
w2vmodel_wrapper = W2VTransformer(size=10, min_count=0, seed=42)
word = texts[0][0]
self.assertRaises(NotFittedError, w2vmodel_wrapper.transform, word)
class TestAuthorTopicWrapper(unittest.TestCase):
def setUp(self):
self.model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
self.model.fit(corpus)
def testTransform(self):
# transforming multiple authors
author_list = ['jill', 'jack']
author_topics = self.model.transform(author_list)
self.assertEqual(author_topics.shape[0], 2)
self.assertEqual(author_topics.shape[1], self.model.num_topics)
# transforming one author
jill_topics = self.model.transform('jill')
self.assertEqual(jill_topics.shape[0], 1)
self.assertEqual(jill_topics.shape[1], self.model.num_topics)
def testPartialFit(self):
self.model.partial_fit(corpus_new, author2doc=author2doc_new)
# Did we learn something about Sally?
output_topics = self.model.transform('sally')
sally_topics = output_topics[0] # getting the topics corresponding to 'sally' (from the list of lists)
self.assertTrue(all(sally_topics > 0))
def testPipeline(self):
# train the AuthorTopic model first
model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
model.fit(corpus)
# create and train clustering model
clstr = cluster.MiniBatchKMeans(n_clusters=2)
authors_full = ['john', 'jane', 'jack', 'jill']
clstr.fit(model.transform(authors_full))
# stack together the two models in a pipeline
text_atm = Pipeline([('features', model,), ('cluster', clstr)])
author_list = ['jane', 'jack', 'jill']
ret_val = text_atm.predict(author_list)
self.assertEqual(len(ret_val), len(author_list))
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"passes": 5, "iterations": 10}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'passes'), 5)
self.assertEqual(getattr(self.model.gensim_model, 'iterations'), 10)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
author_list = ['jill']
loaded_author_topics = model_load.transform(author_list)
# sanity check for transformation operation
self.assertEqual(loaded_author_topics.shape[0], 1)
self.assertEqual(loaded_author_topics.shape[1], self.model.num_topics)
# comparing the original and loaded models
original_author_topics = self.model.transform(author_list)
passed = numpy.allclose(loaded_author_topics, original_author_topics, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
atmodel_wrapper = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
author_list = ['jill', 'jack']
self.assertRaises(NotFittedError, atmodel_wrapper.transform, author_list)
class TestD2VTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = D2VTransformer(min_count=1)
self.model.fit(d2v_sentences)
def testTransform(self):
# tranform multiple documents
docs = [w2v_texts[0], w2v_texts[1], w2v_texts[2]]
matrix = self.model.transform(docs)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.size)
# tranform one document
doc = w2v_texts[0]
matrix = self.model.transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.size)
def testFitTransform(self):
model = D2VTransformer(min_count=1)
# fit and transform multiple documents
docs = [w2v_texts[0], w2v_texts[1], w2v_texts[2]]
matrix = model.fit_transform(docs)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], model.size)
# fit and transform one document
doc = w2v_texts[0]
matrix = model.fit_transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], model.size)
def testSetGetParams(self):
# updating only one param
self.model.set_params(negative=20)
model_params = self.model.get_params()
self.assertEqual(model_params["negative"], 20)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(d2v_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'negative'), 20)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = D2VTransformer(min_count=1)
model.fit(d2v_sentences)
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
(['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),
(['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')
]
train_input = [x[0] for x in train_data]
train_target = [class_dict[x[1]] for x in train_data]
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
clf.fit(model.transform(train_input), train_target)
text_w2v = Pipeline([('features', model,), ('classifier', clf)])
score = text_w2v.score(train_input, train_target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = w2v_texts[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.size)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(sorted(loaded_transformed_vecs), sorted(original_transformed_vecs), atol=1e-1)
self.assertTrue(passed)
def testConsistencyWithGensimModel(self):
# training a D2VTransformer
self.model = D2VTransformer(min_count=1)
self.model.fit(d2v_sentences)
# training a Gensim Doc2Vec model with the same params
gensim_d2vmodel = models.Doc2Vec(d2v_sentences, min_count=1)
doc = w2v_texts[0]
vec_transformer_api = self.model.transform(doc) # vector returned by D2VTransformer
vec_gensim_model = gensim_d2vmodel[doc] # vector returned by Doc2Vec
passed = numpy.allclose(vec_transformer_api, vec_gensim_model, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
d2vmodel_wrapper = D2VTransformer(min_count=1)
self.assertRaises(NotFittedError, d2vmodel_wrapper.transform, 1)
class TestText2BowTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = Text2BowTransformer()
self.model.fit(dict_texts)
def testTransform(self):
# tranform one document
doc = ['computer system interface time computer system']
bow_vec = self.model.transform(doc)[0]
expected_values = [1, 1, 2, 2] # comparing only the word-counts
values = [x[1] for x in bow_vec]
self.assertEqual(sorted(expected_values), sorted(values))
def testSetGetParams(self):
# updating only one param
self.model.set_params(prune_at=1000000)
model_params = self.model.get_params()
self.assertEqual(model_params["prune_at"], 1000000)
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
text2bow_model = Text2BowTransformer()
lda_model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('bow_model', text2bow_model), ('ldamodel', lda_model), ('classifier', clf)])
text_lda.fit(data.data, data.target)
score = text_lda.score(data.data, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = dict_texts[0]
loaded_transformed_vecs = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
self.assertEqual(original_transformed_vecs, loaded_transformed_vecs)
def testModelNotFitted(self):
text2bow_wrapper = Text2BowTransformer()
self.assertRaises(NotFittedError, text2bow_wrapper.transform, dict_texts[0])
class TestTfIdfTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = TfIdfTransformer(normalize=True)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform one document
doc = corpus[0]
transformed_doc = self.model.transform(doc)
expected_doc = [[(0, 0.5773502691896257), (1, 0.5773502691896257), (2, 0.5773502691896257)]]
self.assertTrue(numpy.allclose(transformed_doc, expected_doc))
# tranform multiple documents
docs = [corpus[0], corpus[1]]
transformed_docs = self.model.transform(docs)
expected_docs = [
[(0, 0.5773502691896257), (1, 0.5773502691896257), (2, 0.5773502691896257)],
[(3, 0.44424552527467476), (4, 0.44424552527467476), (5, 0.3244870206138555),
(6, 0.44424552527467476), (7, 0.3244870206138555), (8, 0.44424552527467476)]
]
self.assertTrue(numpy.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(numpy.allclose(transformed_docs[1], expected_docs[1]))
def testSetGetParams(self):
# updating only one param
self.model.set_params(smartirs='nnn')
model_params = self.model.get_params()
self.assertEqual(model_params["smartirs"], 'nnn')
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'smartirs'), 'nnn')
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
tfidf_model = TfIdfTransformer()
tfidf_model.fit(corpus)
lda_model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_tfidf = Pipeline([('tfidf_model', tfidf_model), ('ldamodel', lda_model), ('classifier', clf)])
text_tfidf.fit(corpus, data.target)
score = text_tfidf.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = corpus[0]
loaded_transformed_doc = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_doc = self.model.transform(doc)
self.assertEqual(original_transformed_doc, loaded_transformed_doc)
def testModelNotFitted(self):
tfidf_wrapper = TfIdfTransformer()
self.assertRaises(NotFittedError, tfidf_wrapper.transform, corpus[0])
class TestHdpTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = HdpTransformer(id2word=dictionary, random_state=42)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform one document
doc = self.corpus[0]
transformed_doc = self.model.transform(doc)
expected_doc = [
[0.81043386270128193, 0.049357139518070477, 0.035840906753517532,
0.026542006926698079, 0.019925705902962578, 0.014776690981729117, 0.011068909979528148]
]
self.assertTrue(numpy.allclose(transformed_doc, expected_doc, atol=1e-2))
# tranform multiple documents
docs = [self.corpus[0], self.corpus[1]]
transformed_docs = self.model.transform(docs)
expected_docs = [
[0.81043386270128193, 0.049357139518070477, 0.035840906753517532,
0.026542006926698079, 0.019925705902962578, 0.014776690981729117, 0.011068909979528148],
[0.03795908, 0.39542609, 0.50650585, 0.0151082, 0.01132749, 0., 0.]
]
self.assertTrue(numpy.allclose(transformed_docs[0], expected_docs[0], atol=1e-2))
self.assertTrue(numpy.allclose(transformed_docs[1], expected_docs[1], atol=1e-2))
def testSetGetParams(self):
# updating only one param
self.model.set_params(var_converge=0.05)
model_params = self.model.get_params()
self.assertEqual(model_params["var_converge"], 0.05)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'm_var_converge'), 0.05)
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
model = HdpTransformer(id2word=id2word)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('features', model,), ('classifier', clf)])
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = corpus[0]
loaded_transformed_doc = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_doc = self.model.transform(doc)
self.assertTrue(numpy.allclose(original_transformed_doc, loaded_transformed_doc))
def testModelNotFitted(self):
hdp_wrapper = HdpTransformer(id2word=dictionary)
self.assertRaises(NotFittedError, hdp_wrapper.transform, corpus[0])
class TestPhrasesTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = PhrasesTransformer(min_count=1, threshold=1)
self.model.fit(phrases_sentences)
def testTransform(self):
# tranform one document
doc = phrases_sentences[-1]
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey', u'human_interface']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testPartialFit(self):
new_sentences = [
['world', 'peace', 'humans', 'world', 'peace', 'world', 'peace', 'people'],
['world', 'peace', 'people'],
['world', 'peace', 'humans']
]
self.model.partial_fit(X=new_sentences) # train model with new sentences
doc = ['graph', 'minors', 'survey', 'human', 'interface', 'world', 'peace']
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey', u'human_interface', u'world_peace']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testSetGetParams(self):
# updating only one param
self.model.set_params(progress_per=5000)
model_params = self.model.get_params()
self.assertEqual(model_params["progress_per"], 5000)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(phrases_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'progress_per'), 5000)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = phrases_sentences[-1]
loaded_phrase_tokens = model_load.transform(doc)
# comparing the original and loaded models
original_phrase_tokens = self.model.transform(doc)
self.assertEqual(original_phrase_tokens, loaded_phrase_tokens)
def testModelNotFitted(self):
phrases_transformer = PhrasesTransformer()
self.assertRaises(NotFittedError, phrases_transformer.transform, phrases_sentences[0])
# specifically test pluggable scoring in Phrases, because possible pickling issues with function parameter
# this is intentionally in main rather than a class method to support pickling
# all scores will be 1
def dumb_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
return 1
class TestPhrasesTransformerCustomScorer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = PhrasesTransformer(min_count=1, threshold=.9, scoring=dumb_scorer)
self.model.fit(phrases_sentences)
def testTransform(self):
# tranform one document
doc = phrases_sentences[-1]
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey_human', u'interface']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testPartialFit(self):
new_sentences = [
['world', 'peace', 'humans', 'world', 'peace', 'world', 'peace', 'people'],
['world', 'peace', 'people'],
['world', 'peace', 'humans']
]
self.model.partial_fit(X=new_sentences) # train model with new sentences
doc = ['graph', 'minors', 'survey', 'human', 'interface', 'world', 'peace']
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey_human', u'interface', u'world_peace']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testSetGetParams(self):
# updating only one param
self.model.set_params(progress_per=5000)
model_params = self.model.get_params()
self.assertEqual(model_params["progress_per"], 5000)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(phrases_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'progress_per'), 5000)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = phrases_sentences[-1]
loaded_phrase_tokens = model_load.transform(doc)
# comparing the original and loaded models
original_phrase_tokens = self.model.transform(doc)
self.assertEqual(original_phrase_tokens, loaded_phrase_tokens)
def testModelNotFitted(self):
phrases_transformer = PhrasesTransformer()
self.assertRaises(NotFittedError, phrases_transformer.transform, phrases_sentences[0])
if __name__ == '__main__':
unittest.main()
| 60,933 | 49.027915 | 120 | py |
poincare_glove | poincare_glove-master/gensim/test/test_parsing.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automated tests for the parsing module.
"""
import logging
import unittest
import numpy as np
from gensim.parsing.preprocessing import \
remove_stopwords, strip_punctuation2, strip_tags, strip_short, strip_numeric, strip_non_alphanum, \
strip_multiple_whitespaces, split_alphanum, stem_text
# several documents
doc1 = """C'est un trou de verdure où chante une rivière,
Accrochant follement aux herbes des haillons
D'argent ; où le soleil, de la montagne fière,
Luit : c'est un petit val qui mousse de rayons."""
doc2 = """Un soldat jeune, bouche ouverte, tête nue,
Et la nuque baignant dans le frais cresson bleu,
Dort ; il est étendu dans l'herbe, sous la nue,
Pâle dans son lit vert où la lumière pleut."""
doc3 = """Les pieds dans les glaïeuls, il dort. Souriant comme
Sourirait un enfant malade, il fait un somme :
Nature, berce-le chaudement : il a froid."""
doc4 = """Les parfums ne font pas frissonner sa narine ;
Il dort dans le soleil, la main sur sa poitrine,
Tranquille. Il a deux trous rouges au côté droit."""
doc5 = """While it is quite useful to be able to search a
large collection of documents almost instantly for a joint
occurrence of a collection of exact words,
for many searching purposes, a little fuzziness would help. """
dataset = [strip_punctuation2(x.lower()) for x in [doc1, doc2, doc3, doc4]]
# doc1 and doc2 have class 0, doc3 and doc4 avec class 1
classes = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
class TestPreprocessing(unittest.TestCase):
def testStripNumeric(self):
self.assertEqual(strip_numeric("salut les amis du 59"), "salut les amis du ")
def testStripShort(self):
self.assertEqual(strip_short("salut les amis du 59", 3), "salut les amis")
def testStripTags(self):
self.assertEqual(strip_tags("<i>Hello</i> <b>World</b>!"), "Hello World!")
def testStripMultipleWhitespaces(self):
self.assertEqual(strip_multiple_whitespaces("salut les\r\nloulous!"), "salut les loulous!")
def testStripNonAlphanum(self):
self.assertEqual(strip_non_alphanum("toto nf-kappa titi"), "toto nf kappa titi")
def testSplitAlphanum(self):
self.assertEqual(split_alphanum("toto diet1 titi"), "toto diet 1 titi")
self.assertEqual(split_alphanum("toto 1diet titi"), "toto 1 diet titi")
def testStripStopwords(self):
self.assertEqual(remove_stopwords("the world is square"), "world square")
def testStemText(self):
target = \
"while it is quit us to be abl to search a larg " + \
"collect of document almost instantli for a joint occurr " + \
"of a collect of exact words, for mani search purposes, " + \
"a littl fuzzi would help."
self.assertEqual(stem_text(doc5), target)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
unittest.main()
| 2,925 | 34.682927 | 103 | py |
poincare_glove | poincare_glove-master/gensim/test/test_probability_estimation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for probability estimation algorithms in the probability_estimation module.
"""
import logging
import unittest
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.hashdictionary import HashDictionary
from gensim.topic_coherence import probability_estimation
class BaseTestCases(object):
class ProbabilityEstimationBase(unittest.TestCase):
texts = [
['human', 'interface', 'computer'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees']
]
dictionary = None
def build_segmented_topics(self):
# Suppose the segmented topics from s_one_pre are:
token2id = self.dictionary.token2id
computer_id = token2id['computer']
system_id = token2id['system']
user_id = token2id['user']
graph_id = token2id['graph']
self.segmented_topics = [
[
(system_id, graph_id),
(computer_id, graph_id),
(computer_id, system_id)
], [
(computer_id, graph_id),
(user_id, graph_id),
(user_id, computer_id)
]
]
self.computer_id = computer_id
self.system_id = system_id
self.user_id = user_id
self.graph_id = graph_id
def setup_dictionary(self):
raise NotImplementedError
def setUp(self):
self.setup_dictionary()
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
self.build_segmented_topics()
def testPBooleanDocument(self):
"""Test p_boolean_document()"""
accumulator = probability_estimation.p_boolean_document(
self.corpus, self.segmented_topics)
obtained = accumulator.index_to_dict()
expected = {
self.graph_id: {5},
self.user_id: {1, 3},
self.system_id: {1, 2},
self.computer_id: {0}
}
self.assertEqual(expected, obtained)
def testPBooleanSlidingWindow(self):
"""Test p_boolean_sliding_window()"""
# Test with window size as 2. window_id is zero indexed.
accumulator = probability_estimation.p_boolean_sliding_window(
self.texts, self.segmented_topics, self.dictionary, 2)
self.assertEqual(1, accumulator[self.computer_id])
self.assertEqual(3, accumulator[self.user_id])
self.assertEqual(1, accumulator[self.graph_id])
self.assertEqual(4, accumulator[self.system_id])
class TestProbabilityEstimation(BaseTestCases.ProbabilityEstimationBase):
def setup_dictionary(self):
self.dictionary = HashDictionary(self.texts)
class TestProbabilityEstimationWithNormalDictionary(BaseTestCases.ProbabilityEstimationBase):
def setup_dictionary(self):
self.dictionary = Dictionary(self.texts)
self.dictionary.id2token = {v: k for k, v in self.dictionary.token2id.items()}
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 3,547 | 33.784314 | 93 | py |
poincare_glove | poincare_glove-master/gensim/test/test_fasttext.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import unittest
import os
import struct
import numpy as np
from gensim import utils
from gensim.models.word2vec import LineSentence
from gensim.models.fasttext import FastText as FT_gensim
from gensim.models.wrappers.fasttext import FastTextKeyedVectors
from gensim.models.wrappers.fasttext import FastText as FT_wrapper
from gensim.models.keyedvectors import Word2VecKeyedVectors
from gensim.test.utils import datapath, get_tmpfile, common_texts as sentences
logger = logging.getLogger(__name__)
IS_WIN32 = (os.name == "nt") and (struct.calcsize('P') * 8 == 32)
class LeeCorpus(object):
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for line in f:
yield utils.simple_preprocess(line)
list_corpus = list(LeeCorpus())
new_sentences = [
['computer', 'artificial', 'intelligence'],
['artificial', 'trees'],
['human', 'intelligence'],
['artificial', 'graph'],
['intelligence'],
['artificial', 'intelligence', 'system']
]
class TestFastTextModel(unittest.TestCase):
def setUp(self):
ft_home = os.environ.get('FT_HOME', None)
self.ft_path = os.path.join(ft_home, 'fasttext') if ft_home else None
self.test_model_file = datapath('lee_fasttext')
self.test_model = FT_gensim.load_fasttext_format(self.test_model_file)
self.test_new_model_file = datapath('lee_fasttext_new')
def test_training(self):
model = FT_gensim(size=10, min_count=1, hs=1, negative=0, seed=42, workers=1)
model.build_vocab(sentences)
self.model_sanity(model)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
self.assertEqual(model.wv.syn0.shape, (12, 10))
self.assertEqual(len(model.wv.vocab), 12)
self.assertEqual(model.wv.syn0_vocab.shape[1], 10)
self.assertEqual(model.wv.syn0_ngrams.shape[1], 10)
self.model_sanity(model)
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = FT_gensim(sentences, size=10, min_count=1, hs=1, negative=0, seed=42, workers=1)
self.models_equal(model, model2)
# verify oov-word vector retrieval
invocab_vec = model['minors'] # invocab word
self.assertEqual(len(invocab_vec), 10)
oov_vec = model['minor'] # oov word
self.assertEqual(len(oov_vec), 10)
def models_equal(self, model, model2):
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertEqual(model.num_ngram_vectors, model2.num_ngram_vectors)
self.assertTrue(np.allclose(model.wv.syn0_vocab, model2.wv.syn0_vocab))
self.assertTrue(np.allclose(model.wv.syn0_ngrams, model2.wv.syn0_ngrams))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
most_common_word = max(model.wv.vocab.items(), key=lambda item: item[1].count)[0]
self.assertTrue(np.allclose(model[most_common_word], model2[most_common_word]))
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_persistence(self):
tmpf = get_tmpfile('gensim_fasttext.tst')
model = FT_gensim(sentences, min_count=1)
model.save(tmpf)
self.models_equal(model, FT_gensim.load(tmpf))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(tmpf)
loaded_wv = FastTextKeyedVectors.load(tmpf)
self.assertTrue(np.allclose(wv.syn0_ngrams, loaded_wv.syn0_ngrams))
self.assertEqual(len(wv.vocab), len(loaded_wv.vocab))
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_norm_vectors_not_saved(self):
tmpf = get_tmpfile('gensim_fasttext.tst')
model = FT_gensim(sentences, min_count=1)
model.init_sims()
model.save(tmpf)
loaded_model = FT_gensim.load(tmpf)
self.assertTrue(loaded_model.wv.syn0norm is None)
self.assertTrue(loaded_model.wv.syn0_ngrams_norm is None)
wv = model.wv
wv.save(tmpf)
loaded_kv = FastTextKeyedVectors.load(tmpf)
self.assertTrue(loaded_kv.syn0norm is None)
self.assertTrue(loaded_kv.syn0_ngrams_norm is None)
def model_sanity(self, model):
self.assertEqual(model.wv.syn0.shape, (len(model.wv.vocab), model.vector_size))
self.assertEqual(model.wv.syn0_vocab.shape, (len(model.wv.vocab), model.vector_size))
self.assertEqual(model.wv.syn0_ngrams.shape, (model.num_ngram_vectors, model.vector_size))
def test_load_fasttext_format(self):
try:
model = FT_gensim.load_fasttext_format(self.test_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_model_file, exc))
vocab_size, model_size = 1762, 10
self.assertEqual(model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(model.wv.vocab), vocab_size, model_size)
self.assertEqual(model.wv.syn0_ngrams.shape, (model.num_ngram_vectors, model_size))
expected_vec = [
-0.57144,
-0.0085561,
0.15748,
-0.67855,
-0.25459,
-0.58077,
-0.09913,
1.1447,
0.23418,
0.060007
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
self.assertTrue(np.allclose(model["hundred"], expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.21929,
-0.53778,
-0.22463,
-0.41735,
0.71737,
-1.59758,
-0.24833,
0.62028,
0.53203,
0.77568
]
self.assertTrue(np.allclose(model["rejection"], expected_vec_oov, atol=1e-4))
self.assertEqual(model.min_count, 5)
self.assertEqual(model.window, 5)
self.assertEqual(model.iter, 5)
self.assertEqual(model.negative, 5)
self.assertEqual(model.sample, 0.0001)
self.assertEqual(model.bucket, 1000)
self.assertEqual(model.wv.max_n, 6)
self.assertEqual(model.wv.min_n, 3)
self.assertEqual(model.wv.syn0.shape, (len(model.wv.vocab), model.vector_size))
self.assertEqual(model.wv.syn0_ngrams.shape, (model.num_ngram_vectors, model.vector_size))
def test_load_fasttext_new_format(self):
try:
new_model = FT_gensim.load_fasttext_format(self.test_new_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_new_model_file, exc))
vocab_size, model_size = 1763, 10
self.assertEqual(new_model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(new_model.wv.vocab), vocab_size, model_size)
self.assertEqual(new_model.wv.syn0_ngrams.shape, (new_model.num_ngram_vectors, model_size))
expected_vec = [
-0.025627,
-0.11448,
0.18116,
-0.96779,
0.2532,
-0.93224,
0.3929,
0.12679,
-0.19685,
-0.13179
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
self.assertTrue(np.allclose(new_model["hundred"], expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.49111,
-0.13122,
-0.02109,
-0.88769,
-0.20105,
-0.91732,
0.47243,
0.19708,
-0.17856,
0.19815
]
self.assertTrue(np.allclose(new_model["rejection"], expected_vec_oov, atol=1e-4))
self.assertEqual(new_model.min_count, 5)
self.assertEqual(new_model.window, 5)
self.assertEqual(new_model.iter, 5)
self.assertEqual(new_model.negative, 5)
self.assertEqual(new_model.sample, 0.0001)
self.assertEqual(new_model.bucket, 1000)
self.assertEqual(new_model.wv.max_n, 6)
self.assertEqual(new_model.wv.min_n, 3)
self.assertEqual(new_model.wv.syn0.shape, (len(new_model.wv.vocab), new_model.vector_size))
self.assertEqual(new_model.wv.syn0_ngrams.shape, (new_model.num_ngram_vectors, new_model.vector_size))
def test_load_model_supervised(self):
with self.assertRaises(NotImplementedError):
FT_gensim.load_fasttext_format(datapath('pang_lee_polarity_fasttext'))
def test_load_model_with_non_ascii_vocab(self):
model = FT_gensim.load_fasttext_format(datapath('non_ascii_fasttext'))
self.assertTrue(u'který' in model)
try:
model[u'který']
except UnicodeDecodeError:
self.fail('Unable to access vector for utf8 encoded non-ascii word')
def test_load_model_non_utf8_encoding(self):
model = FT_gensim.load_fasttext_format(datapath('cp852_fasttext'), encoding='cp852')
self.assertTrue(u'který' in model)
try:
model[u'který']
except KeyError:
self.fail('Unable to access vector for cp-852 word')
def test_n_similarity(self):
# In vocab, sanity check
self.assertTrue(np.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(self.test_model.n_similarity(['the'], ['and']), self.test_model.n_similarity(['and'], ['the']))
# Out of vocab check
self.assertTrue(np.allclose(self.test_model.n_similarity(['night', 'nights'], ['nights', 'night']), 1.0))
self.assertEqual(
self.test_model.n_similarity(['night'], ['nights']), self.test_model.n_similarity(['nights'], ['night'])
)
def test_similarity(self):
# In vocab, sanity check
self.assertTrue(np.allclose(self.test_model.similarity('the', 'the'), 1.0))
self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('and', 'the'))
# Out of vocab check
self.assertTrue(np.allclose(self.test_model.similarity('nights', 'nights'), 1.0))
self.assertEqual(self.test_model.similarity('night', 'nights'), self.test_model.similarity('nights', 'night'))
def test_most_similar(self):
# In vocab, sanity check
self.assertEqual(len(self.test_model.most_similar(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('the'), self.test_model.most_similar(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.most_similar(['night', 'nights'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('nights'), self.test_model.most_similar(positive=['nights']))
def test_most_similar_cosmul(self):
# In vocab, sanity check
self.assertEqual(len(self.test_model.most_similar_cosmul(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(
self.test_model.most_similar_cosmul('the'),
self.test_model.most_similar_cosmul(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.most_similar_cosmul(['night', 'nights'], topn=5)), 5)
self.assertEqual(
self.test_model.most_similar_cosmul('nights'),
self.test_model.most_similar_cosmul(positive=['nights']))
def test_lookup(self):
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.vocab)
self.assertTrue(np.allclose(self.test_model['night'], self.test_model[['night']]))
# Out of vocab check
self.assertFalse('nights' in self.test_model.wv.vocab)
self.assertTrue(np.allclose(self.test_model['nights'], self.test_model[['nights']]))
def test_contains(self):
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.vocab)
self.assertTrue('night' in self.test_model)
# Out of vocab check
self.assertFalse('nights' in self.test_model.wv.vocab)
self.assertTrue('nights' in self.test_model)
def test_wm_distance(self):
doc = ['night', 'payment']
oov_doc = ['nights', 'forests', 'payments']
dist = self.test_model.wmdistance(doc, oov_doc)
self.assertNotEqual(float('inf'), dist)
def test_cbow_hs_training(self):
model_gensim = FT_gensim(
size=50, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'rights',
u'kilometres',
u'in',
u'eight',
u'according',
u'flights',
u'during',
u'comes']
overlap_count = len(set(sims_gensim_words).intersection(expected_sims_words))
self.assertGreaterEqual(overlap_count, 2)
def test_sg_hs_training(self):
model_gensim = FT_gensim(
size=50, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=1, negative=0,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'eight',
u'nine',
u'overnight',
u'crew',
u'overnight.',
u'manslaughter',
u'north',
u'flight']
overlap_count = len(set(sims_gensim_words).intersection(expected_sims_words))
self.assertGreaterEqual(overlap_count, 2)
def test_cbow_neg_training(self):
model_gensim = FT_gensim(
size=50, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=5,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'fight',
u'month',
u'hearings',
u'Washington',
u'remains',
u'overnight',
u'running']
overlap_count = len(set(sims_gensim_words).intersection(expected_sims_words))
self.assertGreaterEqual(overlap_count, 2)
def test_sg_neg_training(self):
model_gensim = FT_gensim(
size=50, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=0, negative=5,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'overnight',
u'overnight.',
u'month',
u'land',
u'firm',
u'singles',
u'death']
overlap_count = len(set(sims_gensim_words).intersection(expected_sims_words))
self.assertGreaterEqual(overlap_count, 2)
def test_online_learning(self):
model_hs = FT_gensim(sentences, size=10, min_count=1, seed=42, hs=1, negative=0)
self.assertTrue(len(model_hs.wv.vocab), 12)
self.assertTrue(model_hs.wv.vocab['graph'].count, 3)
model_hs.build_vocab(new_sentences, update=True) # update vocab
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertTrue(model_hs.wv.vocab['graph'].count, 4)
self.assertTrue(model_hs.wv.vocab['artificial'].count, 4)
def test_online_learning_after_save(self):
tmpf = get_tmpfile('gensim_fasttext.tst')
model_neg = FT_gensim(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
model_neg.save(tmpf)
model_neg = FT_gensim.load(tmpf)
self.assertTrue(len(model_neg.wv.vocab), 12)
model_neg.build_vocab(new_sentences, update=True) # update vocab
model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.iter)
self.assertEqual(len(model_neg.wv.vocab), 14)
def online_sanity(self, model):
terro, others = [], []
for l in list_corpus:
if 'terrorism' in l:
terro.append(l)
else:
others.append(l)
self.assertTrue(all(['terrorism' not in l for l in others]))
model.build_vocab(others)
model.train(others, total_examples=model.corpus_count, epochs=model.iter)
# checks that `syn0` is different from `syn0_vocab`
self.assertFalse(np.all(np.equal(model.wv.syn0, model.wv.syn0_vocab)))
self.assertFalse('terrorism' in model.wv.vocab)
model.build_vocab(terro, update=True) # update vocab
self.assertTrue(model.wv.syn0_ngrams.dtype == 'float32')
self.assertTrue('terrorism' in model.wv.vocab)
orig0_all = np.copy(model.wv.syn0_ngrams)
model.train(terro, total_examples=len(terro), epochs=model.iter)
self.assertFalse(np.allclose(model.wv.syn0_ngrams, orig0_all))
sim = model.n_similarity(['war'], ['terrorism'])
self.assertLess(0., sim)
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_sg_hs_online(self):
model = FT_gensim(sg=1, window=2, hs=1, negative=0, min_count=3, iter=1, seed=42, workers=1)
self.online_sanity(model)
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_sg_neg_online(self):
model = FT_gensim(sg=1, window=2, hs=0, negative=5, min_count=3, iter=1, seed=42, workers=1)
self.online_sanity(model)
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_cbow_hs_online(self):
model = FT_gensim(
sg=0, cbow_mean=1, alpha=0.05, window=2, hs=1, negative=0, min_count=3, iter=1, seed=42, workers=1
)
self.online_sanity(model)
@unittest.skipIf(IS_WIN32, "avoid memory error with Appveyor x32")
def test_cbow_neg_online(self):
model = FT_gensim(
sg=0, cbow_mean=1, alpha=0.05, window=2, hs=0, negative=5,
min_count=5, iter=1, seed=42, workers=1, sample=0
)
self.online_sanity(model)
def test_get_vocab_word_vecs(self):
model = FT_gensim(size=10, min_count=1, seed=42)
model.build_vocab(sentences)
original_syn0_vocab = np.copy(model.wv.syn0_vocab)
model.trainables.get_vocab_word_vecs(model.wv)
self.assertTrue(np.all(np.equal(model.wv.syn0_vocab, original_syn0_vocab)))
def test_persistence_word2vec_format(self):
"""Test storing/loading the model in word2vec format."""
tmpf = get_tmpfile('gensim_fasttext_w2v_format.tst')
model = FT_gensim(sentences, min_count=1, size=10)
model.wv.save_word2vec_format(tmpf, binary=True)
loaded_model_kv = Word2VecKeyedVectors.load_word2vec_format(tmpf, binary=True)
self.assertEqual(len(model.wv.vocab), len(loaded_model_kv.vocab))
self.assertTrue(np.allclose(model['human'], loaded_model_kv['human']))
def test_bucket_ngrams(self):
model = FT_gensim(size=10, min_count=1, bucket=20)
model.build_vocab(sentences)
self.assertEqual(model.wv.syn0_ngrams.shape, (20, 10))
model.build_vocab(new_sentences, update=True)
self.assertEqual(model.wv.syn0_ngrams.shape, (20, 10))
def test_estimate_memory(self):
model = FT_gensim(sg=1, hs=1, size=10, negative=5, min_count=3)
model.build_vocab(sentences)
report = model.estimate_memory()
self.assertEqual(report['vocab'], 2800)
self.assertEqual(report['syn0_vocab'], 160)
self.assertEqual(report['syn1'], 160)
self.assertEqual(report['syn1neg'], 160)
self.assertEqual(report['syn0_ngrams'], 2240)
self.assertEqual(report['buckets_word'], 640)
self.assertEqual(report['total'], 6160)
def testLoadOldModel(self):
"""Test loading fasttext models from previous version"""
model_file = 'fasttext_old'
model = FT_gensim.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv.vocab) == 12)
self.assertTrue(len(model.wv.index2word) == 12)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (12, ))
self.assertTrue(model.vocabulary.cum_table.shape == (12, ))
self.assertEqual(len(model.wv.hash2index), 202)
self.assertTrue(model.wv.vectors_vocab.shape == (12, 100))
self.assertTrue(model.wv.vectors_ngrams.shape == (202, 100))
# Model stored in multiple files
model_file = 'fasttext_old_sep'
model = FT_gensim.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv.vocab) == 12)
self.assertTrue(len(model.wv.index2word) == 12)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (12, ))
self.assertTrue(model.vocabulary.cum_table.shape == (12, ))
self.assertEqual(len(model.wv.hash2index), 202)
self.assertTrue(model.wv.vectors_vocab.shape == (12, 100))
self.assertTrue(model.wv.vectors_ngrams.shape == (202, 100))
def compare_with_wrapper(self, model_gensim, model_wrapper):
# make sure we get >=2 overlapping words for top-10 similar words suggested for `night`
sims_gensim = model_gensim.most_similar('night', topn=10)
sims_gensim_words = (list(map(lambda x: x[0], sims_gensim))) # get similar words
sims_wrapper = model_wrapper.most_similar('night', topn=10)
sims_wrapper_words = (list(map(lambda x: x[0], sims_wrapper))) # get similar words
overlap_count = len(set(sims_gensim_words).intersection(sims_wrapper_words))
# overlap increases as we increase `iter` value, min overlap set to 2 to avoid unit-tests taking too long
# this limit can be increased when using Cython code
self.assertGreaterEqual(overlap_count, 2)
def test_cbow_hs_against_wrapper(self):
if self.ft_path is None:
logger.info("FT_HOME env variable not set, skipping test")
return
tmpf = get_tmpfile('gensim_fasttext.tst')
model_wrapper = FT_wrapper.train(ft_path=self.ft_path, corpus_file=datapath('lee_background.cor'),
output_file=tmpf, model='cbow', size=50, alpha=0.05, window=5, min_count=5,
word_ngrams=1,
loss='hs', sample=1e-3, negative=0, iter=5, min_n=3, max_n=6, sorted_vocab=1,
threads=12)
model_gensim = FT_gensim(size=50, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.syn0[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.iter)
self.assertFalse((orig0 == model_gensim.wv.syn0[0]).all()) # vector should vary after training
self.compare_with_wrapper(model_gensim, model_wrapper)
def test_sg_hs_against_wrapper(self):
if self.ft_path is None:
logger.info("FT_HOME env variable not set, skipping test")
return
tmpf = get_tmpfile('gensim_fasttext.tst')
model_wrapper = FT_wrapper.train(ft_path=self.ft_path, corpus_file=datapath('lee_background.cor'),
output_file=tmpf, model='skipgram', size=50, alpha=0.025, window=5,
min_count=5, word_ngrams=1,
loss='hs', sample=1e-3, negative=0, iter=5, min_n=3, max_n=6, sorted_vocab=1,
threads=12)
model_gensim = FT_gensim(size=50, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=1, negative=0,
min_count=5, iter=5, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.syn0[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.iter)
self.assertFalse((orig0 == model_gensim.wv.syn0[0]).all()) # vector should vary after training
self.compare_with_wrapper(model_gensim, model_wrapper)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 28,353 | 44.439103 | 120 | py |
poincare_glove | poincare_glove-master/gensim/test/test_BM25.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
from gensim.summarization.bm25 import get_bm25_weights
from gensim.test.utils import common_texts
class TestBM25(unittest.TestCase):
def test_max_match_with_itself(self):
""" Document should show maximum matching with itself """
weights = get_bm25_weights(common_texts)
for index, doc_weights in enumerate(weights):
expected = max(doc_weights)
predicted = doc_weights[index]
self.assertAlmostEqual(expected, predicted)
def test_nonnegative_weights(self):
""" All the weights for a partiular document should be non negative """
weights = get_bm25_weights(common_texts)
for doc_weights in weights:
for weight in doc_weights:
self.assertTrue(weight >= 0.)
def test_same_match_with_same_document(self):
""" A document should always get the same weight when matched with a particular document """
corpus = [['cat', 'dog', 'mouse'], ['cat', 'lion'], ['cat', 'lion']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], weights[0][2])
def test_disjoint_docs_if_weight_zero(self):
""" Two disjoint documents should have zero matching"""
corpus = [['cat', 'dog', 'lion'], ['goat', 'fish', 'tiger']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], 0)
self.assertAlmostEqual(weights[1][0], 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 1,829 | 34.882353 | 100 | py |
poincare_glove | poincare_glove-master/gensim/test/test_word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import bz2
import sys
import numpy as np
from gensim import utils
from gensim.models import word2vec, keyedvectors
from gensim.test.utils import datapath, get_tmpfile, common_texts as sentences
from testfixtures import log_capture
try:
from pyemd import emd # noqa:F401
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
class LeeCorpus(object):
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for line in f:
yield utils.simple_preprocess(line)
list_corpus = list(LeeCorpus())
new_sentences = [
['computer', 'artificial', 'intelligence'],
['artificial', 'trees'],
['human', 'intelligence'],
['artificial', 'graph'],
['intelligence'],
['artificial', 'intelligence', 'system']
]
def _rule(word, count, min_count):
if word == "human":
return utils.RULE_DISCARD # throw out
else:
return utils.RULE_DEFAULT # apply default rule, i.e. min_count
def load_on_instance():
# Save and load a Word2Vec Model on instance for test
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(tmpf)
model = word2vec.Word2Vec() # should fail at this point
return model.load(tmpf)
class TestWord2VecModel(unittest.TestCase):
def testBuildVocabFromFreq(self):
"""Test that the algorithm is able to build vocabulary from given
frequency table"""
freq_dict = {
'minors': 2, 'graph': 3, 'system': 4,
'trees': 3, 'eps': 2, 'computer': 2,
'survey': 2, 'user': 3, 'human': 2,
'time': 2, 'interface': 2, 'response': 2
}
model_hs = word2vec.Word2Vec(size=10, min_count=0, seed=42, hs=1, negative=0)
model_neg = word2vec.Word2Vec(size=10, min_count=0, seed=42, hs=0, negative=5)
model_hs.build_vocab_from_freq(freq_dict)
model_neg.build_vocab_from_freq(freq_dict)
self.assertEqual(len(model_hs.wv.vocab), 12)
self.assertEqual(len(model_neg.wv.vocab), 12)
self.assertEqual(model_hs.wv.vocab['minors'].count, 2)
self.assertEqual(model_hs.wv.vocab['graph'].count, 3)
self.assertEqual(model_hs.wv.vocab['system'].count, 4)
self.assertEqual(model_hs.wv.vocab['trees'].count, 3)
self.assertEqual(model_hs.wv.vocab['eps'].count, 2)
self.assertEqual(model_hs.wv.vocab['computer'].count, 2)
self.assertEqual(model_hs.wv.vocab['survey'].count, 2)
self.assertEqual(model_hs.wv.vocab['user'].count, 3)
self.assertEqual(model_hs.wv.vocab['human'].count, 2)
self.assertEqual(model_hs.wv.vocab['time'].count, 2)
self.assertEqual(model_hs.wv.vocab['interface'].count, 2)
self.assertEqual(model_hs.wv.vocab['response'].count, 2)
self.assertEqual(model_neg.wv.vocab['minors'].count, 2)
self.assertEqual(model_neg.wv.vocab['graph'].count, 3)
self.assertEqual(model_neg.wv.vocab['system'].count, 4)
self.assertEqual(model_neg.wv.vocab['trees'].count, 3)
self.assertEqual(model_neg.wv.vocab['eps'].count, 2)
self.assertEqual(model_neg.wv.vocab['computer'].count, 2)
self.assertEqual(model_neg.wv.vocab['survey'].count, 2)
self.assertEqual(model_neg.wv.vocab['user'].count, 3)
self.assertEqual(model_neg.wv.vocab['human'].count, 2)
self.assertEqual(model_neg.wv.vocab['time'].count, 2)
self.assertEqual(model_neg.wv.vocab['interface'].count, 2)
self.assertEqual(model_neg.wv.vocab['response'].count, 2)
new_freq_dict = {
'computer': 1, 'artificial': 4, 'human': 1, 'graph': 1, 'intelligence': 4, 'system': 1, 'trees': 1
}
model_hs.build_vocab_from_freq(new_freq_dict, update=True)
model_neg.build_vocab_from_freq(new_freq_dict, update=True)
self.assertEqual(model_hs.wv.vocab['graph'].count, 4)
self.assertEqual(model_hs.wv.vocab['artificial'].count, 4)
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertEqual(len(model_neg.wv.vocab), 14)
def testPruneVocab(self):
"""Test Prune vocab while scanning sentences"""
sentences = [
["graph", "system"],
["graph", "system"],
["system", "eps"],
["graph", "system"]
]
model = word2vec.Word2Vec(sentences, size=10, min_count=0, max_vocab_size=2, seed=42, hs=1, negative=0)
self.assertEqual(len(model.wv.vocab), 2)
self.assertEqual(model.wv.vocab['graph'].count, 3)
self.assertEqual(model.wv.vocab['system'].count, 4)
sentences = [
["graph", "system"],
["graph", "system"],
["system", "eps"],
["graph", "system"],
["minors", "survey", "minors", "survey", "minors"]
]
model = word2vec.Word2Vec(sentences, size=10, min_count=0, max_vocab_size=2, seed=42, hs=1, negative=0)
self.assertEqual(len(model.wv.vocab), 3)
self.assertEqual(model.wv.vocab['graph'].count, 3)
self.assertEqual(model.wv.vocab['minors'].count, 3)
self.assertEqual(model.wv.vocab['system'].count, 4)
def testTotalWordCount(self):
model = word2vec.Word2Vec(size=10, min_count=0, seed=42)
total_words = model.vocabulary.scan_vocab(sentences)[0]
self.assertEqual(total_words, 29)
def testOnlineLearning(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_hs = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=1, negative=0)
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
self.assertTrue(len(model_hs.wv.vocab), 12)
self.assertTrue(model_hs.wv.vocab['graph'].count, 3)
model_hs.build_vocab(new_sentences, update=True)
model_neg.build_vocab(new_sentences, update=True)
self.assertTrue(model_hs.wv.vocab['graph'].count, 4)
self.assertTrue(model_hs.wv.vocab['artificial'].count, 4)
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertEqual(len(model_neg.wv.vocab), 14)
def testOnlineLearningAfterSave(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
tmpf = get_tmpfile('gensim_word2vec.tst')
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
model_neg.save(tmpf)
model_neg = word2vec.Word2Vec.load(tmpf)
self.assertTrue(len(model_neg.wv.vocab), 12)
model_neg.build_vocab(new_sentences, update=True)
model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.iter)
self.assertEqual(len(model_neg.wv.vocab), 14)
def onlineSanity(self, model):
terro, others = [], []
for l in list_corpus:
if 'terrorism' in l:
terro.append(l)
else:
others.append(l)
self.assertTrue(all(['terrorism' not in l for l in others]))
model.build_vocab(others)
model.train(others, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse('terrorism' in model.wv.vocab)
model.build_vocab(terro, update=True)
self.assertTrue('terrorism' in model.wv.vocab)
orig0 = np.copy(model.wv.syn0)
model.train(terro, total_examples=len(terro), epochs=model.iter)
self.assertFalse(np.allclose(model.wv.syn0, orig0))
sim = model.n_similarity(['war'], ['terrorism'])
self.assertLess(0., sim)
def test_sg_hs_online(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_sg_neg_online(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_hs_online(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(
sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=3, iter=10, seed=42, workers=2
)
self.onlineSanity(model)
def test_cbow_neg_online(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(
sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, seed=42, workers=2, sample=0
)
self.onlineSanity(model)
def testPersistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(tmpf)
self.models_equal(model, word2vec.Word2Vec.load(tmpf))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(tmpf)
loaded_wv = keyedvectors.KeyedVectors.load(tmpf)
self.assertTrue(np.allclose(wv.syn0, loaded_wv.syn0))
self.assertEqual(len(wv.vocab), len(loaded_wv.vocab))
def testPersistenceWithConstructorRule(self):
"""Test storing/loading the entire model with a vocab trimming rule passed in the constructor."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule)
model.save(tmpf)
self.models_equal(model, word2vec.Word2Vec.load(tmpf))
def testRuleWithMinCount(self):
"""Test that returning RULE_DEFAULT from trim_rule triggers min_count."""
model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
self.assertTrue("occurs_only_once" not in model.wv.vocab)
self.assertTrue("interface" in model.wv.vocab)
def testRule(self):
"""Test applying vocab trim_rule to build_vocab instead of constructor."""
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
def testLambdaRule(self):
"""Test that lambda trim_rule works."""
def rule(word, count, min_count):
return utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule)
self.assertTrue("human" not in model.wv.vocab)
def testSyn0NormNotSaved(self):
"""Test syn0norm isn't saved in model file"""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.save(tmpf)
loaded_model = word2vec.Word2Vec.load(tmpf)
self.assertTrue(loaded_model.wv.syn0norm is None)
wv = model.wv
wv.save(tmpf)
loaded_kv = keyedvectors.KeyedVectors.load(tmpf)
self.assertTrue(loaded_kv.syn0norm is None)
def testLoadPreKeyedVectorModel(self):
"""Test loading pre-KeyedVectors word2vec model"""
if sys.version_info[:2] == (3, 4):
model_file_suffix = '_py3_4'
elif sys.version_info < (3,):
model_file_suffix = '_py2'
else:
model_file_suffix = '_py3'
# Model stored in one file
model_file = 'word2vec_pre_kv%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
# Model stored in multiple files
model_file = 'word2vec_pre_kv_sep%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
def testLoadPreKeyedVectorModelCFormat(self):
"""Test loading pre-KeyedVectors word2vec model saved in word2vec format"""
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
self.assertTrue(model.syn0.shape[0] == len(model.vocab))
def testPersistenceWord2VecFormat(self):
"""Test storing/loading the entire model in word2vec format."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tmpf, binary=True)
binary_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True)
binary_model_kv.init_sims(replace=False)
self.assertTrue(np.allclose(model['human'], binary_model_kv['human']))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True)
norm_only_model.init_sims(replace=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human']))
limited_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True, limit=3)
self.assertEqual(len(limited_model_kv.syn0), 3)
half_precision_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(
tmpf, binary=True, datatype=np.float16
)
self.assertEqual(binary_model_kv.syn0.nbytes, half_precision_model_kv.syn0.nbytes * 2)
def testNoTrainingCFormat(self):
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tmpf, binary=True)
kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True)
binary_model = word2vec.Word2Vec()
binary_model.wv = kv
self.assertRaises(ValueError, binary_model.train, sentences)
def testTooShortBinaryWord2VecFormat(self):
tfile = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=True)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=True)
def testTooShortTextWord2VecFormat(self):
tfile = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=False)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=False)
def testPersistenceWord2VecFormatNonBinary(self):
"""Test storing/loading the entire model in word2vec non-binary format."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tmpf, binary=False)
text_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=False)
text_model.init_sims(False)
self.assertTrue(np.allclose(model['human'], text_model['human'], atol=1e-6))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=False)
norm_only_model.init_sims(True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human'], atol=1e-6))
self.assertTrue(np.allclose(
model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human'], atol=1e-4
))
def testPersistenceWord2VecFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = get_tmpfile('gensim_word2vec.vocab')
model.wv.save_word2vec_format(tmpf, testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, binary_model_with_vocab_kv.vocab['human'].count)
def testPersistenceKeyedVectorsFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = get_tmpfile('gensim_word2vec.vocab')
model.wv.save_word2vec_format(tmpf, testvocab, binary=True)
kv_binary_model_with_vocab = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, kv_binary_model_with_vocab.vocab['human'].count)
def testPersistenceWord2VecFormatCombinationWithStandardPersistence(self):
"""Test storing/loading the entire model and vocabulary in word2vec format chained with
saving and loading via `save` and `load` methods`.
It was possible prior to 1.0.0 release, now raises Exception"""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = get_tmpfile('gensim_word2vec.vocab')
model.wv.save_word2vec_format(tmpf, testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True)
binary_model_with_vocab_kv.save(tmpf)
self.assertRaises(AttributeError, word2vec.Word2Vec.load, tmpf)
def testLargeMmap(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(tmpf, sep_limit=0)
self.models_equal(model, word2vec.Word2Vec.load(tmpf))
# make sure mmaping the arrays back works, too
self.models_equal(model, word2vec.Word2Vec.load(tmpf, mmap='r'))
def testVocab(self):
"""Test word2vec vocabulary building."""
corpus = LeeCorpus()
total_words = sum(len(sentence) for sentence in corpus)
# try vocab building explicitly, using all words
model = word2vec.Word2Vec(min_count=1, hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 6981)
# with min_count=1, we're not throwing away anything,
# so make sure the word counts add up to be the entire corpus
self.assertEqual(sum(v.count for v in model.wv.vocab.values()), total_words)
# make sure the binary codes are correct
np.allclose(model.wv.vocab['the'].code, [1, 1, 0, 0])
# test building vocab with default params
model = word2vec.Word2Vec(hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 1750)
np.allclose(model.wv.vocab['the'].code, [1, 1, 1, 0])
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, word2vec.Word2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=total_words + 1)
def testTraining(self):
"""Test word2vec training."""
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
self.models_equal(model, model2)
def testScoring(self):
"""Test word2vec scoring."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
# just score and make sure they exist
scores = model.score(sentences, len(sentences))
self.assertEqual(len(scores), len(sentences))
def testLocking(self):
"""Test word2vec training doesn't change locked vectors."""
corpus = LeeCorpus()
# build vocabulary, don't train yet
for sg in range(2): # test both cbow and sg
model = word2vec.Word2Vec(size=4, hs=1, negative=5, min_count=1, sg=sg, window=5)
model.build_vocab(corpus)
# remember two vectors
locked0 = np.copy(model.wv.syn0[0])
unlocked1 = np.copy(model.wv.syn0[1])
# lock the vector in slot 0 against change
model.syn0_lockf[0] = 0.0
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((unlocked1 == model.wv.syn0[1]).all()) # unlocked vector should vary
self.assertTrue((locked0 == model.wv.syn0[0]).all()) # locked vector should not vary
def testAccuracy(self):
"""Test Word2Vec accuracy and KeyedVectors accuracy give the same result"""
model = word2vec.Word2Vec(LeeCorpus())
w2v_accuracy = model.accuracy(datapath('questions-words.txt'))
kv_accuracy = model.wv.accuracy(datapath('questions-words.txt'))
self.assertEqual(w2v_accuracy, kv_accuracy)
def testEvaluateWordPairs(self):
"""Test Spearman and Pearson correlation coefficients give sane results on similarity datasets"""
corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2'))
model = word2vec.Word2Vec(corpus, min_count=3, iter=10)
correlation = model.evaluate_word_pairs(datapath('wordsim353.tsv'))
pearson = correlation[0][0]
spearman = correlation[1][0]
oov = correlation[2]
self.assertTrue(0.1 < pearson < 1.0)
self.assertTrue(0.1 < spearman < 1.0)
self.assertTrue(0.0 <= oov < 90.0)
def model_sanity(self, model, train=True):
"""Even tiny models trained on LeeCorpus should pass these sanity checks"""
# run extra before/after training tests if train=True
if train:
model.build_vocab(list_corpus)
orig0 = np.copy(model.wv.syn0[0])
model.train(list_corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((orig0 == model.wv.syn0[1]).all()) # vector should vary after training
sims = model.most_similar('war', topn=len(model.wv.index2word))
t_rank = [word for word, score in sims].index('terrorism')
# in >200 calibration runs w/ calling parameters, 'terrorism' in 50-most_sim for 'war'
self.assertLess(t_rank, 50)
war_vec = model['war']
sims2 = model.most_similar([war_vec], topn=51)
self.assertTrue('war' in [word for word, score in sims2])
self.assertTrue('terrorism' in [word for word, score in sims2])
def test_sg_hs(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_sg_neg(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_cbow_hs(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(
sg=0, cbow_mean=1, alpha=0.05, window=8, hs=1, negative=0,
min_count=5, iter=10, workers=2, batch_words=1000
)
self.model_sanity(model)
def test_cbow_neg(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(
sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, workers=2, sample=0
)
self.model_sanity(model)
def test_cosmul(self):
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
sims = model.most_similar_cosmul('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar_cosmul(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testTrainingCbow(self):
"""Test CBOW word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=1, negative=0)
self.models_equal(model, model2)
def testTrainingSgNegative(self):
"""Test skip-gram (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=1, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=1, hs=0, negative=2)
self.models_equal(model, model2)
def testTrainingCbowNegative(self):
"""Test CBOW (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=0, negative=2)
self.models_equal(model, model2)
def testSimilarities(self):
"""Test similarity and n_similarity methods."""
# The model is trained using CBOW
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
self.assertTrue(model.n_similarity(['graph', 'trees'], ['trees', 'graph']))
self.assertTrue(model.n_similarity(['graph'], ['trees']) == model.similarity('graph', 'trees'))
self.assertRaises(ZeroDivisionError, model.n_similarity, ['graph', 'trees'], [])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], ['graph', 'trees'])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], [])
def testSimilarBy(self):
"""Test word2vec similar_by_word and similar_by_vector."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
wordsims = model.similar_by_word('graph', topn=10)
wordsims2 = model.most_similar(positive='graph', topn=10)
vectorsims = model.similar_by_vector(model['graph'], topn=10)
vectorsims2 = model.most_similar([model['graph']], topn=10)
self.assertEqual(wordsims, wordsims2)
self.assertEqual(vectorsims, vectorsims2)
def testParallel(self):
"""Test word2vec parallel training."""
if word2vec.FAST_VERSION < 0: # don't test the plain np version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(LeeCorpus(), 10000)
for workers in [2, 4]:
model = word2vec.Word2Vec(corpus, workers=workers)
sims = model.most_similar('israeli') # noqa:F841
# the exact vectors and therefore similarities may differ, due to different thread collisions/randomization
# so let's test only for top3
# TODO: commented out for now; find a more robust way to compare against "gold standard"
# self.assertTrue('palestinian' in [sims[i][0] for i in range(3)])
def testRNG(self):
"""Test word2vec results identical with identical RNG seed."""
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
self.models_equal(model, model2)
def models_equal(self, model, model2):
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
most_common_word = max(model.wv.vocab.items(), key=lambda item: item[1].count)[0]
self.assertTrue(np.allclose(model[most_common_word], model2[most_common_word]))
def testDeleteTemporaryTrainingData(self):
"""Test word2vec model after delete_temporary_training_data"""
for i in [0, 1]:
for j in [0, 1]:
model = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertTrue(len(model['human']), 10)
self.assertTrue(len(model.wv.vocab), 12)
self.assertTrue(model.wv.vocab['graph'].count, 3)
self.assertTrue(not hasattr(model, 'syn1'))
self.assertTrue(not hasattr(model, 'syn1neg'))
self.assertTrue(not hasattr(model, 'syn0_lockf'))
def testNormalizeAfterTrainingData(self):
tmpf = get_tmpfile('gensim_word2vec.tst')
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(tmpf)
norm_only_model = word2vec.Word2Vec.load(tmpf)
norm_only_model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
def testPredictOutputWord(self):
'''Test word2vec predict_output_word method handling for negative sampling scheme'''
# under normal circumstances
model_with_neg = word2vec.Word2Vec(sentences, min_count=1)
predictions_with_neg = model_with_neg.predict_output_word(['system', 'human'], topn=5)
self.assertTrue(len(predictions_with_neg) == 5)
# out-of-vobaculary scenario
predictions_out_of_vocab = model_with_neg.predict_output_word(['some', 'random', 'words'], topn=5)
self.assertEqual(predictions_out_of_vocab, None)
# when required model parameters have been deleted
tmpf = get_tmpfile('gensim_word2vec.tst')
model_with_neg.init_sims()
model_with_neg.wv.save_word2vec_format(tmpf, binary=True)
kv_model_with_neg = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True)
binary_model_with_neg = word2vec.Word2Vec()
binary_model_with_neg.wv = kv_model_with_neg
self.assertRaises(RuntimeError, binary_model_with_neg.predict_output_word, ['system', 'human'])
# negative sampling scheme not used
model_without_neg = word2vec.Word2Vec(sentences, min_count=1, negative=0)
self.assertRaises(RuntimeError, model_without_neg.predict_output_word, ['system', 'human'])
def testLoadOldModel(self):
"""Test loading word2vec models from previous version"""
model_file = 'word2vec_old'
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv.vocab) == 12)
self.assertTrue(len(model.wv.index2word) == 12)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.wv.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (12,))
self.assertTrue(model.vocabulary.cum_table.shape == (12,))
# Model stored in multiple files
model_file = 'word2vec_old_sep'
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv.vocab) == 12)
self.assertTrue(len(model.wv.index2word) == 12)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.wv.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (12,))
self.assertTrue(model.vocabulary.cum_table.shape == (12,))
@log_capture()
def testBuildVocabWarning(self, l):
"""Test if warning is raised on non-ideal input to a word2vec model"""
sentences = ['human', 'machine']
model = word2vec.Word2Vec()
model.build_vocab(sentences)
warning = "Each 'sentences' item should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(l))
@log_capture()
def testTrainWarning(self, l):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
sentences = [
['human'],
['graph', 'trees']
]
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(l))
def test_train_with_explicit_param(self):
model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
with self.assertRaises(ValueError):
model.train(sentences, total_examples=model.corpus_count)
with self.assertRaises(ValueError):
model.train(sentences, epochs=model.iter)
with self.assertRaises(ValueError):
model.train(sentences)
def test_sentences_should_not_be_a_generator(self):
"""
Is sentences a generator object?
"""
gen = (s for s in sentences)
self.assertRaises(TypeError, word2vec.Word2Vec, (gen,))
def testLoadOnClassError(self):
"""Test if exception is raised when loading word2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
def test_reset_from(self):
"""Test if reset_from() uses pre-built structures from other model"""
model = word2vec.Word2Vec(sentences, min_count=1)
other_model = word2vec.Word2Vec(new_sentences, min_count=1)
other_vocab = other_model.wv.vocab
model.reset_from(other_model)
self.assertEqual(model.wv.vocab, other_vocab)
def test_compute_training_loss(self):
model = word2vec.Word2Vec(min_count=1, sg=1, negative=5, hs=1)
model.build_vocab(sentences)
model.train(sentences, compute_loss=True, total_examples=model.corpus_count, epochs=model.iter)
training_loss_val = model.get_latest_training_loss()
self.assertTrue(training_loss_val > 0.0)
# endclass TestWord2VecModel
class TestWMD(unittest.TestCase):
def testNonzero(self):
'''Test basic functionality with a test sentence.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence1, sentence2)
# Check that distance is non-zero.
self.assertFalse(distance == 0.0)
def testSymmetry(self):
'''Check that distance is symmetric.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance1 = model.wmdistance(sentence1, sentence2)
distance2 = model.wmdistance(sentence2, sentence1)
self.assertTrue(np.allclose(distance1, distance2))
def testIdenticalSentences(self):
'''Check that the distance from a sentence to itself is zero.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=1)
sentence = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence, sentence)
self.assertEqual(0.0, distance)
class TestWord2VecSentenceIterators(unittest.TestCase):
def testLineSentenceWorksWithFilename(self):
"""Does LineSentence work with a filename argument?"""
with utils.smart_open(datapath('lee_background.cor')) as orig:
sentences = word2vec.LineSentence(datapath('lee_background.cor'))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithCompressedFile(self):
"""Does LineSentence work with a compressed file object argument?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
sentences = word2vec.LineSentence(bz2.BZ2File(datapath('head500.noblanks.cor.bz2')))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithNormalFile(self):
"""Does LineSentence work with a file object argument, rather than filename?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
with utils.smart_open(datapath('head500.noblanks.cor')) as fin:
sentences = word2vec.LineSentence(fin)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testPathLineSentences(self):
"""Does PathLineSentences work with a path argument?"""
with utils.smart_open(os.path.join(datapath('PathLineSentences'), '1.txt')) as orig1,\
utils.smart_open(os.path.join(datapath('PathLineSentences'), '2.txt.bz2')) as orig2:
sentences = word2vec.PathLineSentences(datapath('PathLineSentences'))
orig = orig1.readlines() + orig2.readlines()
orig_counter = 0 # to go through orig while matching PathLineSentences
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig[orig_counter]).split())
orig_counter += 1
def testPathLineSentencesOneFile(self):
"""Does PathLineSentences work with a single file argument?"""
test_file = os.path.join(datapath('PathLineSentences'), '1.txt')
with utils.smart_open(test_file) as orig:
sentences = word2vec.PathLineSentences(test_file)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
# endclass TestWord2VecSentenceIterators
# TODO: get correct path to Python binary
# class TestWord2VecScripts(unittest.TestCase):
# def testWord2VecStandAloneScript(self):
# """Does Word2Vec script launch standalone?"""
# cmd = 'python -m gensim.scripts.word2vec_standalone -train ' + datapath('testcorpus.txt') + \
# ' -output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3 -min_count 1'
# output = check_output(cmd, stderr=PIPE)
# self.assertEqual(output, '0')
# #endclass TestWord2VecScripts
if not hasattr(TestWord2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestWord2VecModel, 'assertLess', assertLess)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.DEBUG
)
logging.info("using optimization %s", word2vec.FAST_VERSION)
unittest.main()
| 43,941 | 45.996791 | 119 | py |
poincare_glove | poincare_glove-master/gensim/test/test_matutils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import unittest
import numpy as np
from scipy.special import psi # gamma function utils
import gensim.matutils as matutils
# we'll define known, good (slow) version of functions here
# and compare results from these functions vs. cython ones
def logsumexp(x):
"""Log of sum of exponentials.
Parameters
----------
x : numpy.ndarray
Input 2d matrix.
Returns
-------
float
log of sum of exponentials of elements in `x`.
Warnings
--------
By performance reasons, doesn't support NaNs or 1d, 3d, etc arrays like :func:`scipy.special.logsumexp`.
"""
x_max = np.max(x)
x = np.log(np.sum(np.exp(x - x_max)))
x += x_max
return x
def mean_absolute_difference(a, b):
"""Mean absolute difference between two arrays.
Parameters
----------
a : numpy.ndarray
Input 1d array.
b : numpy.ndarray
Input 1d array.
Returns
-------
float
mean(abs(a - b)).
"""
return np.mean(np.abs(a - b))
def dirichlet_expectation(alpha):
"""For a vector :math:`\\theta \sim Dir(\\alpha)`, compute :math:`E[log \\theta]`.
Parameters
----------
alpha : numpy.ndarray
Dirichlet parameter 2d matrix or 1d vector, if 2d - each row is treated as a separate parameter vector.
Returns
-------
numpy.ndarray:
:math:`E[log \\theta]`
"""
if len(alpha.shape) == 1:
result = psi(alpha) - psi(np.sum(alpha))
else:
result = psi(alpha) - psi(np.sum(alpha, 1))[:, np.newaxis]
return result.astype(alpha.dtype, copy=False) # keep the same precision as input
dirichlet_expectation_1d = dirichlet_expectation
dirichlet_expectation_2d = dirichlet_expectation
class TestLdaModelInner(unittest.TestCase):
def setUp(self):
self.random_state = np.random.RandomState()
self.num_runs = 100 # test functions with *num_runs* random inputs
self.num_topics = 100
def testLogSumExp(self):
# test logsumexp
rs = self.random_state
for dtype in [np.float16, np.float32, np.float64]:
for i in range(self.num_runs):
input = rs.uniform(-1000, 1000, size=(self.num_topics, 1))
known_good = logsumexp(input)
test_values = matutils.logsumexp(input)
msg = "logsumexp failed for dtype={}".format(dtype)
self.assertTrue(np.allclose(known_good, test_values), msg)
def testMeanAbsoluteDifference(self):
# test mean_absolute_difference
rs = self.random_state
for dtype in [np.float16, np.float32, np.float64]:
for i in range(self.num_runs):
input1 = rs.uniform(-10000, 10000, size=(self.num_topics,))
input2 = rs.uniform(-10000, 10000, size=(self.num_topics,))
known_good = mean_absolute_difference(input1, input2)
test_values = matutils.mean_absolute_difference(input1, input2)
msg = "mean_absolute_difference failed for dtype={}".format(dtype)
self.assertTrue(np.allclose(known_good, test_values), msg)
def testDirichletExpectation(self):
# test dirichlet_expectation
rs = self.random_state
for dtype in [np.float16, np.float32, np.float64]:
for i in range(self.num_runs):
# 1 dimensional case
input_1d = rs.uniform(.01, 10000, size=(self.num_topics,))
known_good = dirichlet_expectation(input_1d)
test_values = matutils.dirichlet_expectation(input_1d)
msg = "dirichlet_expectation_1d failed for dtype={}".format(dtype)
self.assertTrue(np.allclose(known_good, test_values), msg)
# 2 dimensional case
input_2d = rs.uniform(.01, 10000, size=(1, self.num_topics,))
known_good = dirichlet_expectation(input_2d)
test_values = matutils.dirichlet_expectation(input_2d)
msg = "dirichlet_expectation_2d failed for dtype={}".format(dtype)
self.assertTrue(np.allclose(known_good, test_values), msg)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 4,523 | 29.77551 | 111 | py |
poincare_glove | poincare_glove-master/gensim/test/test_lee.py | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Lee et al. (2005)
Lee et al. (2005) compares different models for semantic
similarity and verifies the results with similarity judgements from humans.
As a validation of the gensim implementation we reproduced the results
of Lee et al. (2005) in this test.
Many thanks to Michael D. Lee (michael.lee@adelaide.edu.au) who provideded us
with his corpus and similarity data.
If you need to reference this dataset, please cite:
Lee, M., Pincombe, B., & Welsh, M. (2005).
An empirical evaluation of models of text document similarity.
Proceedings of the 27th Annual Conference of the Cognitive Science Society
"""
from __future__ import with_statement
import logging
import os.path
import unittest
from functools import partial
import numpy as np
from gensim import corpora, models, utils, matutils
from gensim.parsing.preprocessing import preprocess_documents, preprocess_string, DEFAULT_FILTERS
bg_corpus = None
corpus = None
human_sim_vector = None
class TestLeeTest(unittest.TestCase):
def setUp(self):
"""setup lee test corpora"""
global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
bg_corpus_file = 'lee_background.cor'
corpus_file = 'lee.cor'
sim_file = 'similarities0-1.txt'
# read in the corpora
latin1 = partial(utils.to_unicode, encoding='latin1')
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus = preprocess_documents(latin1(line) for line in f)
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus = preprocess_documents(latin1(line) for line in f)
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
# read the human similarity data
sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[np.triu_indices(sim_m_size, 1)]
def test_corpus(self):
"""availability and integrity of corpus"""
documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
def test_lee(self):
"""correlation with human data > 0.6
(this is the value which was achieved in the original paper)
"""
global bg_corpus, corpus
# create a dictionary and corpus (bag of words)
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
# transform the bag of words with log_entropy normalization
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
# initialize an LSI transformation from background corpus
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
# transform small corpus to lsi bow->log_ent->fold-in-lsi
corpus_lsi = lsi[log_ent[corpus]]
# compute pairwise similarity matrix and extract upper triangular
res = np.zeros((len(corpus), len(corpus)))
for i, par1 in enumerate(corpus_lsi):
for j, par2 in enumerate(corpus_lsi):
res[i, j] = matutils.cossim(par1, par2)
flat = res[np.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[0, 1]
logging.info("LSI correlation coefficient is %s", cor)
self.assertTrue(cor > 0.6)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 4,277 | 36.526316 | 97 | py |
poincare_glove | poincare_glove-master/gensim/test/svd_error.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
"""USAGE: %(program)s MATRIX.mm [CLIP_DOCS] [CLIP_TERMS]
Check truncated SVD error for the algo in gensim, using a given corpus. This script
runs the decomposition with several internal parameters (number of requested factors,
iterative chunk size) and reports error for each parameter combination.
The number of input documents is clipped to the first CLIP_DOCS. Similarly,
only the first CLIP_TERMS are considered (features with id >= CLIP_TERMS are
ignored, effectively restricting the vocabulary size). If you don't specify them,
the entire matrix will be used.
Example: ./svd_error.py ~/gensim/results/wiki_en_v10k.mm.bz2 100000 10000
"""
from __future__ import print_function, with_statement
import logging
import os
import sys
import time
import bz2
import itertools
import numpy as np
import scipy.linalg
import gensim
try:
from sparsesvd import sparsesvd
except ImportError:
# no SVDLIBC: install with `easy_install sparsesvd` if you want SVDLIBC results as well
sparsesvd = None
sparsesvd = None # don't use SVDLIBC
FACTORS = [300] # which num_topics to try
CHUNKSIZE = [10000, 1000] # which chunksize to try
POWER_ITERS = [0, 1, 2, 4, 6] # extra power iterations for the randomized algo
# when reporting reconstruction error, also report spectral norm error? (very slow)
COMPUTE_NORM2 = False
def norm2(a):
"""Spectral norm ("norm 2") of a symmetric matrix `a`."""
if COMPUTE_NORM2:
logging.info("computing spectral norm of a %s matrix", str(a.shape))
return scipy.linalg.eigvalsh(a).max() # much faster than np.linalg.norm(2)
else:
return np.nan
def rmse(diff):
return np.sqrt(1.0 * np.multiply(diff, diff).sum() / diff.size)
def print_error(name, aat, u, s, ideal_nf, ideal_n2):
err = -np.dot(u, np.dot(np.diag(s), u.T))
err += aat
nf, n2 = np.linalg.norm(err), norm2(err)
print(
'%s error: norm_frobenius=%f (/ideal=%g), norm2=%f (/ideal=%g), RMSE=%g' %
(name, nf, nf / ideal_nf, n2, n2 / ideal_n2, rmse(err))
)
sys.stdout.flush()
class ClippedCorpus(object):
def __init__(self, corpus, max_docs, max_terms):
self.corpus = corpus
self.max_docs, self.max_terms = max_docs, max_terms
def __iter__(self):
for doc in itertools.islice(self.corpus, self.max_docs):
yield [(f, w) for f, w in doc if f < self.max_terms]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# do we have enough cmd line arguments?
if len(sys.argv) < 2:
print(globals()["__doc__"] % locals())
sys.exit(1)
fname = sys.argv[1]
if fname.endswith('bz2'):
mm = gensim.corpora.MmCorpus(bz2.BZ2File(fname))
else:
mm = gensim.corpora.MmCorpus(fname)
# extra cmd parameters = use a subcorpus (fewer docs, smaller vocab)
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = mm.num_docs
if len(sys.argv) > 3:
m = int(sys.argv[3])
else:
m = mm.num_terms
logging.info("using %i documents and %i features", n, m)
corpus = ClippedCorpus(mm, n, m)
id2word = gensim.utils.FakeDict(m)
logging.info("computing corpus * corpus^T") # eigenvalues of this matrix are singular values of `corpus`, squared
aat = np.zeros((m, m), dtype=np.float64)
for chunk in gensim.utils.grouper(corpus, chunksize=5000):
num_nnz = sum(len(doc) for doc in chunk)
chunk = gensim.matutils.corpus2csc(chunk, num_nnz=num_nnz, num_terms=m, num_docs=len(chunk), dtype=np.float32)
chunk = chunk * chunk.T
chunk = chunk.toarray()
aat += chunk
del chunk
logging.info("computing full decomposition of corpus * corpus^t")
aat = aat.astype(np.float32)
spectrum_s, spectrum_u = scipy.linalg.eigh(aat)
spectrum_s = spectrum_s[::-1] # re-order to descending eigenvalue order
spectrum_u = spectrum_u.T[::-1].T
np.save(fname + '.spectrum.npy', spectrum_s)
for factors in FACTORS:
err = -np.dot(spectrum_u[:, :factors], np.dot(np.diag(spectrum_s[:factors]), spectrum_u[:, :factors].T))
err += aat
ideal_fro = np.linalg.norm(err)
del err
ideal_n2 = spectrum_s[factors + 1]
print('*' * 40, "%i factors, ideal error norm_frobenius=%f, norm_2=%f" % (factors, ideal_fro, ideal_n2))
print("*" * 30, end="")
print_error("baseline", aat,
np.zeros((m, factors)), np.zeros((factors)), ideal_fro, ideal_n2)
if sparsesvd:
logging.info("computing SVDLIBC SVD for %i factors", factors)
taken = time.time()
corpus_ram = gensim.matutils.corpus2csc(corpus, num_terms=m)
ut, s, vt = sparsesvd(corpus_ram, factors)
taken = time.time() - taken
del corpus_ram
del vt
u, s = ut.T.astype(np.float32), s.astype(np.float32)**2 # convert singular values to eigenvalues
del ut
print("SVDLIBC SVD for %i factors took %s s (spectrum %f .. %f)"
% (factors, taken, s[0], s[-1]))
print_error("SVDLIBC", aat, u, s, ideal_fro, ideal_n2)
del u
for power_iters in POWER_ITERS:
for chunksize in CHUNKSIZE:
logging.info(
"computing incremental SVD for %i factors, %i power iterations, chunksize %i",
factors, power_iters, chunksize
)
taken = time.time()
gensim.models.lsimodel.P2_EXTRA_ITERS = power_iters
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors,
chunksize=chunksize, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"incremental SVD for %i factors, %i power iterations, "
"chunksize %i took %s s (spectrum %f .. %f)" %
(factors, power_iters, chunksize, taken, s[0], s[-1])
)
print_error('incremental SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("computing multipass SVD for %i factors, %i power iterations", factors, power_iters)
taken = time.time()
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors, chunksize=2000,
onepass=False, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"multipass SVD for %i factors, "
"%i power iterations took %s s (spectrum %f .. %f)" %
(factors, power_iters, taken, s[0], s[-1])
)
print_error('multipass SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("finished running %s", program)
| 7,398 | 36.94359 | 118 | py |
poincare_glove | poincare_glove-master/gensim/test/test_dtm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automated tests for DTM/DIM model
"""
import logging
from subprocess import CalledProcessError
import gensim
import os
import unittest
from gensim import corpora
from gensim.test.utils import datapath
class TestDtmModel(unittest.TestCase):
def setUp(self):
self.time_slices = [3, 7]
self.corpus = corpora.mmcorpus.MmCorpus(datapath('dtm_test.mm'))
self.id2word = corpora.Dictionary.load(datapath('dtm_test.dict'))
# first you need to setup the environment variable $DTM_PATH for the dtm executable file
self.dtm_path = os.environ.get('DTM_PATH', None)
if not self.dtm_path:
self.skipTest("$DTM_PATH is not properly set up.")
def testDtm(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='dtm', initialize_lda=True,
rng_seed=1
)
topics = model.show_topics(num_topics=2, times=2, num_words=10)
self.assertEqual(len(topics), 4)
one_topic = model.show_topic(topicid=1, time=1, num_words=10)
self.assertEqual(len(one_topic), 10)
self.assertEqual(one_topic[0][1], u'idexx')
def testDim(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='fixed', initialize_lda=True,
rng_seed=1
)
topics = model.show_topics(num_topics=2, times=2, num_words=10)
self.assertEqual(len(topics), 4)
one_topic = model.show_topic(topicid=1, time=1, num_words=10)
self.assertEqual(len(one_topic), 10)
self.assertEqual(one_topic[0][1], u'skills')
# In stderr expect "Error opening file /tmp/a65419_train_out/initial-lda-ss.dat. Failing."
def testCalledProcessError(self):
if self.dtm_path is not None:
with self.assertRaises(CalledProcessError):
gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='dtm', initialize_lda=False,
rng_seed=1
)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 2,535 | 34.71831 | 96 | py |
poincare_glove | poincare_glove-master/gensim/test/utils.py | #!/usr/bin/env python
# encoding: utf-8
"""Module contains common utilities used in automated code tests for Gensim modules.
Attributes:
-----------
module_path : str
Full path to this module directory.
common_texts : list of list of str
Toy dataset.
common_dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary of toy dataset.
common_corpus : list of list of (int, int)
Corpus of toy dataset.
Examples:
---------
It's easy to keep objects in temporary folder and reuse'em if needed:
>>> from gensim.models import word2vec
>>> from gensim.test.utils import get_tmpfile, common_texts
>>>
>>> model = word2vec.Word2Vec(common_texts, min_count=1)
>>> temp_path = get_tmpfile('toy_w2v')
>>> model.save(temp_path)
>>>
>>> new_model = word2vec.Word2Vec.load(temp_path)
>>> result = new_model.wv.most_similar("human", topn=1)
Let's print first document in toy dataset and then recreate it using its corpus and dictionary.
>>> from gensim.test.utils import common_texts, common_dictionary, common_corpus
>>> print(common_texts[0])
['human', 'interface', 'computer']
>>> assert common_dictionary.doc2bow(common_texts[0]) == common_corpus[0]
We can find our toy set in test data directory.
>>> from gensim.test.utils import datapath
>>>
>>> with open(datapath("testcorpus.txt")) as f:
... texts = [line.strip().split() for line in f]
>>> print(texts[0])
['computer', 'human', 'interface']
If you don't need to keep temporary objects on disk use :func:`~gensim.test.utils.temporary_file`:
>>> from gensim.test.utils import temporary_file, common_corpus, common_dictionary
>>> from gensim.models import LdaModel
>>>
>>> with temporary_file("temp.txt") as tf:
... lda = LdaModel(common_corpus, id2word=common_dictionary, num_topics=3)
... lda.save(tf)
"""
import contextlib
import tempfile
import os
import shutil
from gensim.corpora import Dictionary
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
def datapath(fname):
"""Get full path for file `fname` in test data directory placed in this module directory.
Usually used to place corpus to test_data directory.
Parameters
----------
fname : str
Name of file.
Returns
-------
str
Full path to `fname` in test_data folder.
Example
-------
Let's get path of test GloVe data file and check if it exits.
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> for document in corpus:
... pass
"""
return os.path.join(module_path, 'test_data', fname)
def get_tmpfile(suffix):
"""Get full path to file `suffix` in temporary folder.
This function doesn't creates file (only generate unique name).
Also, it may return different paths in consecutive calling.
Parameters
----------
suffix : str
Suffix of file.
Returns
-------
str
Path to `suffix` file in temporary folder.
Examples
--------
Using this function we may get path to temporary file and use it, for example, to store temporary model.
>>> from gensim.models import LsiModel
>>> from gensim.test.utils import get_tmpfile, common_dictionary, common_corpus
>>>
>>> tmp_f = get_tmpfile("toy_lsi_model")
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> model.save(tmp_f)
>>>
>>> loaded_model = LsiModel.load(tmp_f)
"""
return os.path.join(tempfile.gettempdir(), suffix)
@contextlib.contextmanager
def temporary_file(name=""):
"""This context manager creates file `name` in temporary directory and returns its full path.
Temporary directory with included files will deleted at the end of context. Note, it won't create file.
Parameters
----------
name : str
Filename.
Yields
------
str
Path to file `name` in temporary directory.
Examples
--------
This example demonstrates that created temporary directory (and included
files) will deleted at the end of context.
>>> import os
>>> from gensim.test.utils import temporary_file
>>> with temporary_file("temp.txt") as tf, open(tf, 'w') as outfile:
... outfile.write("my extremely useful information")
... print("Is this file exists? {}".format(os.path.exists(tf)))
... print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this file exists? True
Is this folder exists? True
>>>
>>> print("Is this file exists? {}".format(os.path.exists(tf)))
Is this file exists? False
>>> print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this folder exists? False
"""
# note : when dropping python2.7 support, we can use tempfile.TemporaryDirectory
tmp = tempfile.mkdtemp()
try:
yield os.path.join(tmp, name)
finally:
shutil.rmtree(tmp, ignore_errors=True)
# set up vars used in testing ("Deerwester" from the web tutorial)
common_texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
| 5,596 | 27.850515 | 108 | py |
poincare_glove | poincare_glove-master/gensim/test/test_miislita.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module replicates the miislita vector spaces from
"A Linear Algebra Approach to the Vector Space Model -- A Fast Track Tutorial"
by Dr. E. Garcia, admin@miislita.com
See http://www.miislita.com for further details.
"""
from __future__ import division # always use floats
from __future__ import with_statement
import logging
import os
import unittest
from gensim import utils, corpora, models, similarities
from gensim.test.utils import datapath, get_tmpfile
logger = logging.getLogger('test_miislita')
class CorpusMiislita(corpora.TextCorpus):
stoplist = set('for a of the and to in on'.split())
def get_texts(self):
"""
Parse documents from the .cor file provided in the constructor. Lowercase
each document and ignore some stopwords.
.cor format: one document per line, words separated by whitespace.
"""
for doc in self.getstream():
yield [word for word in utils.to_unicode(doc).lower().split()
if word not in CorpusMiislita.stoplist]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
logger.info("caching corpus size (calculating number of documents)")
self.length = sum(1 for _ in self.get_texts())
return self.length
class TestMiislita(unittest.TestCase):
def test_textcorpus(self):
"""Make sure TextCorpus can be serialized to disk. """
# construct corpus from file
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
# make sure serializing works
ftmp = get_tmpfile('test_textcorpus.mm')
corpora.MmCorpus.save_corpus(ftmp, miislita)
self.assertTrue(os.path.exists(ftmp))
# make sure deserializing gives the same result
miislita2 = corpora.MmCorpus(ftmp)
self.assertEqual(list(miislita), list(miislita2))
def test_save_load_ability(self):
"""
Make sure we can save and load (un/pickle) TextCorpus objects (as long
as the underlying input isn't a file-like object; we cannot pickle those).
"""
# construct corpus from file
corpusname = datapath('miIslita.cor')
miislita = CorpusMiislita(corpusname)
# pickle to disk
tmpf = get_tmpfile('tc_test.cpickle')
miislita.save(tmpf)
miislita2 = CorpusMiislita.load(tmpf)
self.assertEqual(len(miislita), len(miislita2))
self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
def test_miislita_high_level(self):
# construct corpus from file
miislita = CorpusMiislita(datapath('miIslita.cor'))
# initialize tfidf transformation and similarity index
tfidf = models.TfidfModel(miislita, miislita.dictionary, normalize=False)
index = similarities.SparseMatrixSimilarity(tfidf[miislita], num_features=len(miislita.dictionary))
# compare to query
query = 'latent semantic indexing'
vec_bow = miislita.dictionary.doc2bow(query.lower().split())
vec_tfidf = tfidf[vec_bow]
# perform a similarity query against the corpus
sims_tfidf = index[vec_tfidf]
# for the expected results see the article
expected = [0.0, 0.2560, 0.7022, 0.1524, 0.3334]
for i, value in enumerate(expected):
self.assertAlmostEqual(sims_tfidf[i], value, 2)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 3,661 | 32.59633 | 107 | py |
poincare_glove | poincare_glove-master/gensim/test/test_ldamallet_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import numpy as np
from gensim.corpora import mmcorpus, Dictionary
from gensim.models.wrappers import ldamallet
from gensim import matutils
from gensim.models import ldamodel
from gensim.test import basetmtests
from gensim.test.utils import datapath, get_tmpfile, common_texts
dictionary = Dictionary(common_texts)
corpus = [dictionary.doc2bow(text) for text in common_texts]
class TestLdaMallet(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
if not self.mallet_path:
raise unittest.SkipTest("MALLET_HOME not specified. Skipping Mallet tests.")
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
# self.model is used in TestBaseTopicModel
self.model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=1)
def testTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.49, 0.51]
# must contain the same values, up to re-ordering
passed = np.allclose(sorted(vec), sorted(expected), atol=1e-1)
if passed:
break
logging.warning(
"LDA failed to converge on attempt %i (got %s, expected %s)",
i, sorted(vec), sorted(expected)
)
self.assertTrue(passed)
def testSparseTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the sparse transformation model with the appropriate topic_threshold
model = ldamallet.LdaMallet(
self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200, topic_threshold=0.5
)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [1.0, 0.0]
# must contain the same values, up to re-ordering
passed = np.allclose(sorted(vec), sorted(expected), atol=1e-2)
if passed:
break
logging.warning(
"LDA failed to converge on attempt %i (got %s, expected %s)",
i, sorted(vec), sorted(expected)
)
self.assertTrue(passed)
def testMallet2Model(self):
if not self.mallet_path:
return
tm1 = ldamallet.LdaMallet(self.mallet_path, corpus=corpus, num_topics=2, id2word=dictionary)
tm2 = ldamallet.malletmodel2ldamodel(tm1)
for document in corpus:
element1_1, element1_2 = tm1[document][0]
element2_1, element2_2 = tm2[document][0]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 1)
element1_1, element1_2 = tm1[document][1]
element2_1, element2_2 = tm2[document][1]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 1)
logging.debug('%d %d', element1_1, element2_1)
logging.debug('%d %d', element1_2, element2_2)
logging.debug('%d %d', tm1[document][1], tm2[document][1])
def testPersistence(self):
if not self.mallet_path:
return
fname = get_tmpfile('gensim_models_lda_mallet.tst')
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.word_topics, model2.word_topics))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
if not self.mallet_path:
return
fname = get_tmpfile('gensim_models_lda_mallet.tst.gz')
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.word_topics, model2.word_topics))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
if not self.mallet_path:
return
fname = get_tmpfile('gensim_models_lda_mallet.tst')
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.word_topics, np.memmap))
self.assertTrue(np.allclose(model.word_topics, model2.word_topics))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
if not self.mallet_path:
return
fname = get_tmpfile('gensim_models_lda_mallet.tst.gz')
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamodel.LdaModel.load, fname, mmap='r')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 6,810 | 40.785276 | 115 | py |
poincare_glove | poincare_glove-master/gensim/test/test_text_analysis.py | import logging
import unittest
from gensim.corpora.dictionary import Dictionary
from gensim.topic_coherence.text_analysis import (
InvertedIndexAccumulator, WordOccurrenceAccumulator, ParallelWordOccurrenceAccumulator,
CorpusAccumulator)
from gensim.test.utils import common_texts
class BaseTestCases(object):
class TextAnalyzerTestBase(unittest.TestCase):
texts = [
['this', 'is', 'a'],
['test', 'document'],
['this', 'test', 'document'],
['test', 'test', 'this']
]
token2id = {
'this': 10,
'is': 15,
'a': 20,
'test': 21,
'document': 17
}
dictionary = Dictionary(texts)
dictionary.token2id = token2id
dictionary.id2token = {v: k for k, v in token2id.items()}
top_ids = set(token2id.values())
texts2 = common_texts + [['user', 'user']]
dictionary2 = Dictionary(texts2)
dictionary2.id2token = {v: k for k, v in dictionary2.token2id.items()}
top_ids2 = set(dictionary2.token2id.values())
accumulator_cls = None
def init_accumulator(self):
return self.accumulator_cls(self.top_ids, self.dictionary)
def init_accumulator2(self):
return self.accumulator_cls(self.top_ids2, self.dictionary2)
def test_occurrence_counting(self):
accumulator = self.init_accumulator().accumulate(self.texts, 3)
self.assertEqual(3, accumulator.get_occurrences("this"))
self.assertEqual(1, accumulator.get_occurrences("is"))
self.assertEqual(1, accumulator.get_occurrences("a"))
self.assertEqual(2, accumulator.get_co_occurrences("test", "document"))
self.assertEqual(2, accumulator.get_co_occurrences("test", "this"))
self.assertEqual(1, accumulator.get_co_occurrences("is", "a"))
def test_occurrence_counting2(self):
accumulator = self.init_accumulator2().accumulate(self.texts2, 110)
self.assertEqual(2, accumulator.get_occurrences("human"))
self.assertEqual(4, accumulator.get_occurrences("user"))
self.assertEqual(3, accumulator.get_occurrences("graph"))
self.assertEqual(3, accumulator.get_occurrences("trees"))
cases = [
(1, ("human", "interface")),
(2, ("system", "user")),
(2, ("graph", "minors")),
(2, ("graph", "trees")),
(4, ("user", "user")),
(3, ("graph", "graph")),
(0, ("time", "eps"))
]
for expected_count, (word1, word2) in cases:
# Verify co-occurrence counts are correct, regardless of word order.
self.assertEqual(expected_count, accumulator.get_co_occurrences(word1, word2))
self.assertEqual(expected_count, accumulator.get_co_occurrences(word2, word1))
# Also verify that using token ids instead of tokens works the same.
word_id1 = self.dictionary2.token2id[word1]
word_id2 = self.dictionary2.token2id[word2]
self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id1, word_id2))
self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id2, word_id1))
def test_occurences_for_irrelevant_words(self):
accumulator = self.init_accumulator().accumulate(self.texts, 2)
with self.assertRaises(KeyError):
accumulator.get_occurrences("irrelevant")
with self.assertRaises(KeyError):
accumulator.get_co_occurrences("test", "irrelevant")
class TestInvertedIndexAccumulator(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = InvertedIndexAccumulator
def test_accumulate1(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 2)
# [['this', 'is'], ['is', 'a'], ['test', 'document'], ['this', 'test'],
# ['test', 'document'], ['test', 'test'], ['test', 'this']]
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 3, 6},
15: {0, 1},
20: {1},
21: {2, 3, 4, 5, 6},
17: {2, 4}
}
self.assertDictEqual(expected, inverted_index)
def test_accumulate2(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 3)
# [['this', 'is', 'a'], ['test', 'document'], ['this', 'test', 'document'],
# ['test', 'test', 'this']
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 2, 3},
15: {0},
20: {0},
21: {1, 2, 3},
17: {1, 2}
}
self.assertDictEqual(expected, inverted_index)
class TestWordOccurrenceAccumulator(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = WordOccurrenceAccumulator
class TestParallelWordOccurrenceAccumulator(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = ParallelWordOccurrenceAccumulator
def init_accumulator(self):
return self.accumulator_cls(2, self.top_ids, self.dictionary)
def init_accumulator2(self):
return self.accumulator_cls(2, self.top_ids2, self.dictionary2)
class TestCorpusAnalyzer(unittest.TestCase):
def setUp(self):
self.dictionary = BaseTestCases.TextAnalyzerTestBase.dictionary
self.top_ids = BaseTestCases.TextAnalyzerTestBase.top_ids
self.corpus = \
[self.dictionary.doc2bow(doc) for doc in BaseTestCases.TextAnalyzerTestBase.texts]
def test_index_accumulation(self):
accumulator = CorpusAccumulator(self.top_ids).accumulate(self.corpus)
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 2, 3},
15: {0},
20: {0},
21: {1, 2, 3},
17: {1, 2}
}
self.assertDictEqual(expected, inverted_index)
self.assertEqual(3, accumulator.get_occurrences(10))
self.assertEqual(2, accumulator.get_occurrences(17))
self.assertEqual(2, accumulator.get_co_occurrences(10, 21))
self.assertEqual(1, accumulator.get_co_occurrences(10, 17))
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 6,482 | 37.820359 | 100 | py |
poincare_glove | poincare_glove-master/gensim/test/simspeed2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s CORPUS_DENSE.mm CORPUS_SPARSE.mm [NUMDOCS]
Run speed test of similarity queries. Only use the first NUMDOCS documents of \
each corpus for testing (or use all if no NUMDOCS is given).
The two sample corpora can be downloaded from http://nlp.fi.muni.cz/projekty/gensim/wikismall.tgz
Example: ./simspeed2.py wikismall.dense.mm wikismall.sparse.mm
"""
import logging
import sys
import itertools
import os
import math
from time import time
import gensim
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
corpus_dense = gensim.corpora.MmCorpus(sys.argv[1])
corpus_sparse = gensim.corpora.MmCorpus(sys.argv[2])
dense_features, sparse_features = corpus_dense.num_terms, corpus_sparse.num_terms
if len(sys.argv) > 3:
NUMDOCS = int(sys.argv[3])
corpus_dense = list(itertools.islice(corpus_dense, NUMDOCS))
corpus_sparse = list(itertools.islice(corpus_sparse, NUMDOCS))
# create the query index to be tested (one for dense input, one for sparse)
index_dense = gensim.similarities.Similarity('/tmp/tstdense', corpus_dense, dense_features)
index_sparse = gensim.similarities.Similarity('/tmp/tstsparse', corpus_sparse, sparse_features)
density = 100.0 * sum(shard.num_nnz for shard in index_sparse.shards) / (len(index_sparse) * sparse_features)
logging.info(
"test 1 (dense): similarity of all vs. all (%i documents, %i dense features)",
len(corpus_dense), index_dense.num_features
)
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024, index_dense.shardsize]:
index_dense.chunksize = chunksize
start = time()
for sim in index_dense:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_dense) / taken, queries / taken
)
index_dense.num_best = 10
logging.info("test 2 (dense): as above, but only ask for the top-10 most similar for each document")
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024, index_dense.shardsize]:
index_dense.chunksize = chunksize
start = time()
sims = [sim for sim in index_dense]
taken = time() - start
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_dense) / taken, queries / taken
)
index_dense.num_best = None
logging.info(
"test 3 (sparse): similarity of all vs. all (%i documents, %i features, %.2f%% density)",
len(corpus_sparse), index_sparse.num_features, density
)
for chunksize in [1, 5, 10, 100, 256, 500, 1000, index_sparse.shardsize]:
index_sparse.chunksize = chunksize
start = time()
for sim in index_sparse:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_sparse) / taken, queries / taken
)
index_sparse.num_best = 10
logging.info("test 4 (sparse): as above, but only ask for the top-10 most similar for each document")
for chunksize in [1, 5, 10, 100, 256, 500, 1000, index_sparse.shardsize]:
index_sparse.chunksize = chunksize
start = time()
for sim in index_sparse:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_sparse) / taken, queries / taken
)
index_sparse.num_best = None
# Difference between test #5 and test #1 is that the query in #5 is a gensim iterable
# corpus, while in #1, the index is used directly (numpy arrays). So #5 is slower,
# because it needs to convert sparse vecs to numpy arrays and normalize them to
# unit length=extra work, which #1 avoids.
query = list(itertools.islice(corpus_dense, 1000))
logging.info(
"test 5 (dense): dense corpus of %i docs vs. index (%i documents, %i dense features)",
len(query), len(index_dense), index_dense.num_features
)
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
_ = index_dense[chunk]
else:
for vec in query:
_ = index_dense[vec]
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(query) / taken, queries / taken
)
# Same comment as for test #5.
query = list(itertools.islice(corpus_dense, 1000))
logging.info(
"test 6 (sparse): sparse corpus of %i docs vs. sparse index (%i documents, %i features, %.2f%% density)",
len(query), len(corpus_sparse), index_sparse.num_features, density
)
for chunksize in [1, 5, 10, 100, 500, 1000]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
_ = index_sparse[chunk]
else:
for vec in query:
_ = index_sparse[vec]
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(query) / taken, queries / taken
)
logging.info("finished running %s", program)
| 6,415 | 39.1 | 113 | py |
poincare_glove | poincare_glove-master/gensim/test/test_indirect_confirmation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for indirect confirmation measures in the indirect_confirmation_measure module.
"""
import logging
import unittest
import numpy as np
from gensim.corpora.dictionary import Dictionary
from gensim.topic_coherence import indirect_confirmation_measure
from gensim.topic_coherence import text_analysis
class TestIndirectConfirmation(unittest.TestCase):
def setUp(self):
# Set up toy example for better understanding and testing
# of this module. See the modules for the mathematical formulas
self.topics = [np.array([1, 2])]
# Result from s_one_set segmentation:
self.segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
self.gamma = 1
self.measure = 'nlr'
self.dictionary = Dictionary()
self.dictionary.id2token = {1: 'fake', 2: 'tokens'}
def testCosineSimilarity(self):
"""Test cosine_similarity()"""
accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, self.dictionary)
accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
accumulator._num_docs = 5
obtained = indirect_confirmation_measure.cosine_similarity(
self.segmentation, accumulator, self.topics, self.measure, self.gamma)
# The steps involved in this calculation are as follows:
# 1. Take (1, array([1, 2]). Take w' which is 1.
# 2. Calculate nlr(1, 1), nlr(1, 2). This is our first vector.
# 3. Take w* which is array([1, 2]).
# 4. Calculate nlr(1, 1) + nlr(2, 1). Calculate nlr(1, 2), nlr(2, 2). This is our second vector.
# 5. Find out cosine similarity between these two vectors.
# 6. Similarly for the second segmentation.
expected = (0.6230 + 0.6230) / 2. # To account for EPSILON approximation
self.assertAlmostEqual(expected, obtained[0], 4)
mean, std = indirect_confirmation_measure.cosine_similarity(
self.segmentation, accumulator, self.topics, self.measure, self.gamma,
with_std=True)[0]
self.assertAlmostEqual(expected, mean, 4)
self.assertAlmostEqual(0.0, std, 1)
def testWord2VecSimilarity(self):
"""Sanity check word2vec_similarity."""
accumulator = text_analysis.WordVectorsAccumulator({1, 2}, self.dictionary)
accumulator.accumulate([
['fake', 'tokens'],
['tokens', 'fake']
], 5)
mean, std = indirect_confirmation_measure.word2vec_similarity(
self.segmentation, accumulator, with_std=True)[0]
self.assertNotEqual(0.0, mean)
self.assertNotEqual(0.0, std)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 2,906 | 37.76 | 104 | py |
poincare_glove | poincare_glove-master/gensim/test/test_coherencemodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import os
import unittest
from unittest import SkipTest
import multiprocessing as mp
from functools import partial
import numpy as np
from gensim.matutils import argsort
from gensim.models.coherencemodel import CoherenceModel, BOOLEAN_DOCUMENT_BASED
from gensim.models.ldamodel import LdaModel
from gensim.models.wrappers import LdaMallet
from gensim.models.wrappers import LdaVowpalWabbit
from gensim.test.utils import get_tmpfile, common_texts, common_dictionary, common_corpus
class TestCoherenceModel(unittest.TestCase):
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = common_texts
dictionary = common_dictionary
corpus = common_corpus
def setUp(self):
# Suppose given below are the topics which two different LdaModels come up with.
# `topics1` is clearly better as it has a clear distinction between system-human
# interaction and graphs. Hence both the coherence measures for `topics1` should be
# greater.
self.topics1 = [
['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']
]
self.topics2 = [
['user', 'graph', 'minors', 'system'],
['time', 'graph', 'survey', 'minors']
]
self.ldamodel = LdaModel(
corpus=self.corpus, id2word=self.dictionary, num_topics=2,
passes=0, iterations=0
)
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
if self.mallet_path:
self.malletmodel = LdaMallet(
mallet_path=self.mallet_path, corpus=self.corpus,
id2word=self.dictionary, num_topics=2, iterations=0
)
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
logging.info(
"Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping sanity checks for LDA Model"
)
self.vw_path = None
else:
self.vw_path = vw_path
self.vwmodel = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, id2word=self.dictionary,
num_topics=2, passes=0
)
def check_coherence_measure(self, coherence):
"""Check provided topic coherence algorithm on given topics"""
if coherence in BOOLEAN_DOCUMENT_BASED:
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence)
else:
kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence)
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm2 = CoherenceModel(topics=self.topics2, **kwargs)
self.assertGreater(cm1.get_coherence(), cm2.get_coherence())
def testUMass(self):
"""Test U_Mass topic coherence algorithm on given topics"""
self.check_coherence_measure('u_mass')
def testCv(self):
"""Test C_v topic coherence algorithm on given topics"""
self.check_coherence_measure('c_v')
def testCuci(self):
"""Test C_uci topic coherence algorithm on given topics"""
self.check_coherence_measure('c_uci')
def testCnpmi(self):
"""Test C_npmi topic coherence algorithm on given topics"""
self.check_coherence_measure('c_npmi')
def testUMassLdaModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Model"""
# Note that this is just a sanity check because LDA does not guarantee a better coherence
# value on the topics if iterations are increased. This can be seen here:
# https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde
CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')
def testCvLdaModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')
def testCw2vLdaModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDAModel."""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_w2v')
def testCuciLdaModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')
def testCnpmiLdaModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')
def testUMassMalletModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_mallet(self):
if not self.mallet_path:
raise SkipTest("Mallet not installed")
def testCvMalletModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_v')
def testCw2vMalletModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_w2v')
def testCuciMalletModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_uci')
def testCnpmiMalletModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_npmi')
def testUMassVWModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_vw(self):
if not self.vw_path:
raise SkipTest("Vowpal Wabbit not installed")
def testCvVWModel(self):
"""Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_v')
def testCw2vVWModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_w2v')
def testCuciVWModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_uci')
def testCnpmiVWModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_npmi')
def testErrors(self):
"""Test if errors are raised on bad input"""
# not providing dictionary
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
coherence='u_mass'
)
# not providing texts for c_v and instead providing corpus
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
dictionary=self.dictionary, coherence='c_v'
)
# not providing corpus or texts for u_mass
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary,
coherence='u_mass'
)
def testProcesses(self):
get_model = partial(CoherenceModel,
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model, used_cpus = get_model(), mp.cpu_count() - 1
self.assertEqual(model.processes, used_cpus)
for p in range(-2, 1):
self.assertEqual(get_model(processes=p).processes, used_cpus)
for p in range(1, 4):
self.assertEqual(get_model(processes=p).processes, p)
def testPersistence(self):
fname = get_tmpfile('gensim_models_coherence.tst')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_coherence.tst.gz')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingCorpus(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingTexts(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, texts=self.texts, dictionary=self.dictionary, coherence='c_v'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testAccumulatorCachingSameSizeTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics2
self.assertEqual(None, cm1._accumulator)
def testAccumulatorCachingTopicSubsets(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = [t[:2] for t in self.topics1]
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
def testAccumulatorCachingWithModelSetting(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
cm1.model = self.ldamodel
topics = []
for topic in self.ldamodel.state.get_lambda():
bestn = argsort(topic, topn=cm1.topn, reverse=True)
topics.append(bestn)
self.assertTrue(np.array_equal(topics, cm1.topics))
self.assertIsNone(cm1._accumulator)
def testAccumulatorCachingWithTopnSettingGivenTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
# Topics should not have been truncated, so topn settings below 5 should work
cm1.topn = 4
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(4, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
with self.assertRaises(ValueError):
cm1.topn = 6 # can't expand topics any further without model
def testAccumulatorCachingWithTopnSettingGivenModel(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(model=self.ldamodel, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
cm1.topn = 6 # should be able to expand given the model
self.assertEqual(6, len(cm1.topics[0]))
def testCompareCoherenceForTopics(self):
topics = [self.topics1, self.topics2]
cm = CoherenceModel.for_topics(
topics, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for topic_list in topics:
cm.topics = topic_list
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_model_topics(topics)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertGreater(coherence1, coherence2)
def testCompareCoherenceForModels(self):
models = [self.ldamodel, self.ldamodel]
cm = CoherenceModel.for_models(
models, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for model in models:
cm.model = model
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_models(models)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertAlmostEqual(coherence1, coherence2, places=4)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 15,457 | 41.350685 | 111 | py |
poincare_glove | poincare_glove-master/gensim/test/test_ldavowpalwabbit_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Tests for Vowpal Wabbit LDA wrapper.
Will not be run unless the environment variable 'VOWPAL_WABBIT_PATH' is set
and points to the `vw` executable.
"""
import logging
import unittest
import os
import os.path
import tempfile
from collections import defaultdict
import six
from gensim.corpora import Dictionary
import gensim.models.wrappers.ldavowpalwabbit as ldavowpalwabbit
from gensim.models.wrappers.ldavowpalwabbit import LdaVowpalWabbit
from gensim.test.utils import datapath
# set up vars used in testing ("Deerwester" from the web tutorial)
TOPIC_WORDS = [
'cat lion leopard mouse jaguar lynx cheetah tiger kitten puppy'.split(),
'engine car wheel brakes tyre motor suspension cylinder exhaust clutch'.split(),
'alice bob robert tim sue rachel dave harry alex jim'.split(),
'c cplusplus go python haskell scala java ruby csharp erlang'.split(),
'eggs ham mushrooms cereal coffee beans tea juice sausages bacon'.split()
]
def get_corpus():
text_path = datapath('ldavowpalwabbit.txt')
dict_path = datapath('ldavowpalwabbit.dict.txt')
dictionary = Dictionary.load_from_text(dict_path)
with open(text_path) as fhandle:
corpus = [dictionary.doc2bow(l.strip().split()) for l in fhandle]
return corpus, dictionary
class TestLdaVowpalWabbit(unittest.TestCase):
def setUp(self):
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
msg = "Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping tests"
try:
raise unittest.SkipTest(msg)
except AttributeError:
# couldn't find a way of skipping tests in python 2.6
self.vw_path = None
corpus, dictionary = get_corpus()
self.vw_path = vw_path
self.corpus = corpus
self.dictionary = dictionary
def test_save_load(self):
"""Test loading/saving LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
with tempfile.NamedTemporaryFile() as fhandle:
lda.save(fhandle.name)
lda2 = LdaVowpalWabbit.load(fhandle.name)
# ensure public fields are saved/loaded correctly
saved_fields = [
lda.alpha, lda.chunksize, lda.cleanup_files,
lda.decay, lda.eta, lda.gamma_threshold,
lda.id2word, lda.num_terms, lda.num_topics,
lda.passes, lda.random_seed, lda.vw_path
]
loaded_fields = [
lda2.alpha, lda2.chunksize, lda2.cleanup_files,
lda2.decay, lda2.eta, lda2.gamma_threshold,
lda2.id2word, lda2.num_terms, lda2.num_topics,
lda2.passes, lda2.random_seed, lda2.vw_path
]
self.assertEqual(saved_fields, loaded_fields)
# ensure topic matrices are saved/loaded correctly
saved_topics = lda.show_topics(num_topics=5, num_words=10)
loaded_topics = lda2.show_topics(num_topics=5, num_words=10)
self.assertEqual(loaded_topics, saved_topics)
def test_model_update(self):
"""Test updating existing LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=[self.corpus[0]], passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.update(self.corpus[1:])
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_perplexity(self):
"""Test LdaVowpalWabbit perplexity is within expected range."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
# varies, but should be between -1 and -5
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_topic_coherence(self):
"""Test LdaVowpalWabbit topic coherence."""
if not self.vw_path: # for python 2.6
return
corpus, dictionary = get_corpus()
lda = LdaVowpalWabbit(
self.vw_path, corpus=corpus, passes=10, chunksize=256,
id2word=dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.print_topics(5, 10)
# map words in known topic to an ID
topic_map = {}
for i, words in enumerate(TOPIC_WORDS):
topic_map[frozenset(words)] = i
n_coherent = 0
for topic_id in range(lda.num_topics):
topic = lda.show_topic(topic_id, topn=20)
# get all words from LDA topic
topic_words = [w[1] for w in topic]
# get list of original topics that each word actually belongs to
ids = []
for word in topic_words:
for src_topic_words, src_topic_id in six.iteritems(topic_map):
if word in src_topic_words:
ids.append(src_topic_id)
# count the number of times each original topic appears
counts = defaultdict(int)
for found_topic_id in ids:
counts[found_topic_id] += 1
# if at least 6/10 words assigned to same topic, consider it coherent
max_count = 0
for count in six.itervalues(counts):
max_count = max(max_count, count)
if max_count >= 6:
n_coherent += 1
# not 100% deterministic, but should always get 3+ coherent topics
self.assertTrue(n_coherent >= 3)
def test_corpus_to_vw(self):
"""Test corpus to Vowpal Wabbit format conversion."""
if not self.vw_path: # for python 2.6
return
corpus = [
[(0, 5), (7, 1), (5, 3), (0, 2)],
[(7, 2), (2, 1), (3, 11)],
[(1, 1)],
[],
[(5, 2), (0, 1)]
]
expected = """
| 0:5 7:1 5:3 0:2
| 7:2 2:1 3:11
| 1:1
|
| 5:2 0:1
""".strip()
result = '\n'.join(ldavowpalwabbit.corpus_to_vw(corpus))
self.assertEqual(result, expected)
def testvwmodel2ldamodel(self):
"""Test copying of VWModel to LdaModel"""
if not self.vw_path:
return
tm1 = LdaVowpalWabbit(vw_path=self.vw_path, corpus=self.corpus, num_topics=2, id2word=self.dictionary)
tm2 = ldavowpalwabbit.vwmodel2ldamodel(tm1)
for document in self.corpus:
element1_1, element1_2 = tm1[document][0]
element2_1, element2_2 = tm2[document][0]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 5)
logging.debug('%d %d', element1_1, element2_1)
logging.debug('%d %d', element1_2, element2_2)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 7,796 | 35.097222 | 110 | py |
poincare_glove | poincare_glove-master/gensim/test/test_corpora_hashdictionary.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.HashDictionary` class.
"""
import logging
import unittest
import os
import zlib
from gensim.corpora.hashdictionary import HashDictionary
from gensim.test.utils import get_tmpfile, common_texts
class TestHashDictionary(unittest.TestCase):
def setUp(self):
self.texts = common_texts
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {10608: 1, 12466: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
# three docs
texts = [['human'], ['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {9273: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {9273: 1, 15001: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
def testDebugMode(self):
# two words
texts = [['human', 'cat']]
d = HashDictionary(texts, debug=True, myhash=zlib.adler32)
expected = {9273: {'cat'}, 31002: {'human'}}
self.assertEqual(d.id2token, expected)
# now the same thing, with debug off
texts = [['human', 'cat']]
d = HashDictionary(texts, debug=False, myhash=zlib.adler32)
expected = {}
self.assertEqual(d.id2token, expected)
def testRange(self):
# all words map to the same id
d = HashDictionary(self.texts, id_range=1, debug=True)
dfs = {0: 9}
id2token = {
0: {
'minors', 'graph', 'system', 'trees', 'eps', 'computer',
'survey', 'user', 'human', 'time', 'interface', 'response'
}
}
token2id = {
'minors': 0, 'graph': 0, 'system': 0, 'trees': 0,
'eps': 0, 'computer': 0, 'survey': 0, 'user': 0,
'human': 0, 'time': 0, 'interface': 0, 'response': 0
}
self.assertEqual(d.dfs, dfs)
self.assertEqual(d.id2token, id2token)
self.assertEqual(d.token2id, token2id)
# 2 ids: 0/1 for even/odd number of bytes in the word
d = HashDictionary(self.texts, id_range=2, myhash=lambda key: len(key))
dfs = {0: 7, 1: 7}
id2token = {
0: {'minors', 'system', 'computer', 'survey', 'user', 'time', 'response'},
1: {'interface', 'graph', 'trees', 'eps', 'human'}
}
token2id = {
'minors': 0, 'graph': 1, 'system': 0, 'trees': 1, 'eps': 1, 'computer': 0,
'survey': 0, 'user': 0, 'human': 1, 'time': 0, 'interface': 1, 'response': 0
}
self.assertEqual(d.dfs, dfs)
self.assertEqual(d.id2token, id2token)
self.assertEqual(d.token2id, token2id)
def testBuild(self):
d = HashDictionary(self.texts, myhash=zlib.adler32)
expected = {
5232: 2, 5798: 3, 10608: 2, 12466: 2, 12736: 3, 15001: 2,
18451: 3, 23844: 3, 28591: 2, 29104: 2, 31002: 2, 31049: 2
}
self.assertEqual(d.dfs, expected)
expected = {
'minors': 15001, 'graph': 18451, 'system': 5798, 'trees': 23844,
'eps': 31049, 'computer': 10608, 'survey': 28591, 'user': 12736,
'human': 31002, 'time': 29104, 'interface': 12466, 'response': 5232
}
for ex in expected:
self.assertEqual(d.token2id[ex], expected[ex])
def testFilter(self):
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes()
expected = {}
self.assertEqual(d.dfs, expected)
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes(no_below=0, no_above=0.3)
expected = {
29104: 2, 31049: 2, 28591: 2, 5232: 2,
10608: 2, 12466: 2, 15001: 2, 31002: 2
}
self.assertEqual(d.dfs, expected)
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes(no_below=3, no_above=1.0, keep_n=4)
expected = {5798: 3, 12736: 3, 18451: 3, 23844: 3}
self.assertEqual(d.dfs, expected)
def test_saveAsText(self):
""" `HashDictionary` can be saved as textfile. """
tmpf = get_tmpfile('dict_test.txt')
# use some utf8 strings, to test encoding serialization
d = HashDictionary(['žloťoučký koníček'.split(), 'Малйж обльйквюэ ат эжт'.split()])
d.save_as_text(tmpf)
self.assertTrue(os.path.exists(tmpf))
def test_saveAsTextBz2(self):
""" `HashDictionary` can be saved & loaded as compressed pickle. """
tmpf = get_tmpfile('dict_test.txt.bz2')
# use some utf8 strings, to test encoding serialization
d = HashDictionary(['žloťoučký koníček'.split(), 'Малйж обльйквюэ ат эжт'.split()])
d.save(tmpf)
self.assertTrue(os.path.exists(tmpf))
d2 = d.load(tmpf)
self.assertEqual(len(d), len(d2))
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
| 6,552 | 35.608939 | 91 | py |
poincare_glove | poincare_glove-master/gensim/test/test_wordrank_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import numpy
from gensim.models.wrappers import wordrank
from gensim.test.utils import datapath, get_tmpfile
class TestWordrank(unittest.TestCase):
def setUp(self):
wr_home = os.environ.get('WR_HOME', None)
self.wr_path = wr_home if wr_home else None
self.corpus_file = datapath('lee.cor')
self.out_name = 'testmodel'
self.wr_file = datapath('test_glove.txt')
if not self.wr_path:
return
self.test_model = wordrank.Wordrank.train(
self.wr_path, self.corpus_file, self.out_name, iter=6,
dump_period=5, period=5, np=4, cleanup_files=True
)
def testLoadWordrankFormat(self):
"""Test model successfully loaded from Wordrank format file"""
model = wordrank.Wordrank.load_wordrank_model(self.wr_file)
vocab_size, dim = 76, 50
self.assertEqual(model.syn0.shape, (vocab_size, dim))
self.assertEqual(len(model.vocab), vocab_size)
os.remove(self.wr_file + '.w2vformat')
def testEnsemble(self):
"""Test ensemble of two embeddings"""
if not self.wr_path:
return
new_emb = self.test_model.ensemble_embedding(self.wr_file, self.wr_file)
self.assertEqual(new_emb.shape, (76, 50))
os.remove(self.wr_file + '.w2vformat')
def testPersistence(self):
"""Test storing/loading the entire model"""
if not self.wr_path:
return
tmpf = get_tmpfile('gensim_wordrank.test')
self.test_model.save(tmpf)
loaded = wordrank.Wordrank.load(tmpf)
self.models_equal(self.test_model, loaded)
def testSimilarity(self):
"""Test n_similarity for vocab words"""
if not self.wr_path:
return
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('the', 'and'))
def testLookup(self):
if not self.wr_path:
return
self.assertTrue(numpy.allclose(self.test_model['night'], self.test_model[['night']]))
def models_equal(self, model, model2):
self.assertEqual(len(model.vocab), len(model2.vocab))
self.assertEqual(set(model.vocab.keys()), set(model2.vocab.keys()))
self.assertTrue(numpy.allclose(model.syn0, model2.syn0))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,854 | 33.817073 | 108 | py |
poincare_glove | poincare_glove-master/gensim/test/basetmtests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import numpy as np
import six
class TestBaseTopicModel(object):
def test_print_topic(self):
topics = self.model.show_topics(formatted=True)
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, str) or isinstance(topic, unicode)) # noqa:F821
def test_print_topics(self):
topics = self.model.print_topics()
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, str) or isinstance(topic, unicode)) # noqa:F821
def test_show_topic(self):
topic = self.model.show_topic(1)
for k, v in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, (np.floating, float)))
def test_show_topics(self):
topics = self.model.show_topics(formatted=False)
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, (np.floating, float)))
def test_get_topics(self):
topics = self.model.get_topics()
vocab_size = len(self.model.id2word)
for topic in topics:
self.assertTrue(isinstance(topic, np.ndarray))
# Note: started moving to np.float32 as default
# self.assertEqual(topic.dtype, np.float64)
self.assertEqual(vocab_size, topic.shape[0])
self.assertAlmostEqual(np.sum(topic), 1.0, 5)
| 1,947 | 34.418182 | 94 | py |
poincare_glove | poincare_glove-master/gensim/test/test_big.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking processing/storing large inputs.
"""
import logging
import unittest
import os
import numpy as np
import gensim
from gensim.test.utils import get_tmpfile
class BigCorpus(object):
"""A corpus of a large number of docs & large vocab"""
def __init__(self, words_only=False, num_terms=200000, num_docs=1000000, doc_len=100):
self.dictionary = gensim.utils.FakeDict(num_terms)
self.words_only = words_only
self.num_docs = num_docs
self.doc_len = doc_len
def __iter__(self):
for _ in range(self.num_docs):
doc_len = np.random.poisson(self.doc_len)
ids = np.random.randint(0, len(self.dictionary), doc_len)
if self.words_only:
yield [str(idx) for idx in ids]
else:
weights = np.random.poisson(3, doc_len)
yield sorted(zip(ids, weights))
if os.environ.get('GENSIM_BIG', False):
class TestLargeData(unittest.TestCase):
"""Try common operations, using large models. You'll need ~8GB RAM to run these tests"""
def testWord2Vec(self):
corpus = BigCorpus(words_only=True, num_docs=100000, num_terms=3000000, doc_len=200)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.Word2Vec(corpus, size=300, workers=4)
model.save(tmpf, ignore=['syn1'])
del model
gensim.models.Word2Vec.load(tmpf)
def testLsiModel(self):
corpus = BigCorpus(num_docs=50000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LsiModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(tmpf)
del model
gensim.models.LsiModel.load(tmpf)
def testLdaModel(self):
corpus = BigCorpus(num_docs=5000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LdaModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(tmpf)
del model
gensim.models.LdaModel.load(tmpf)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,435 | 31.918919 | 96 | py |
poincare_glove | poincare_glove-master/gensim/test/test_atmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Olavur Mortensen <olavurmortensen@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for the author-topic model (AuthorTopicModel class). These tests
are based on the unit tests of LDA; the classes are quite similar, and the tests
needed are thus quite similar.
"""
import logging
import unittest
import numbers
from os import remove
import six
import numpy as np
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import atmodel
from gensim import matutils
from gensim.test import basetmtests
from gensim.test.utils import (datapath,
get_tmpfile, common_texts, common_dictionary as dictionary, common_corpus as corpus)
# TODO:
# Test that computing the bound on new unseen documents works as expected (this is somewhat different
# in the author-topic model than in LDA).
# Perhaps test that the bound increases, in general (i.e. in several of the tests below where it makes
# sense. This is not tested in LDA either. Tests can also be made to check that automatic prior learning
# increases the bound.
# Test that models are compatiple across versions, as done in LdaModel.
# Assign some authors randomly to the documents above.
author2doc = {
'john': [0, 1, 2, 3, 4, 5, 6],
'jane': [2, 3, 4, 5, 6, 7, 8],
'jack': [0, 2, 4, 6, 8],
'jill': [1, 3, 5, 7]
}
doc2author = {
0: ['john', 'jack'],
1: ['john', 'jill'],
2: ['john', 'jane', 'jack'],
3: ['john', 'jane', 'jill'],
4: ['john', 'jane', 'jack'],
5: ['john', 'jane', 'jill'],
6: ['john', 'jane', 'jack'],
7: ['jane', 'jill'],
8: ['jane', 'jack']
}
# More data with new and old authors (to test update method).
# Although the text is just a subset of the previous, the model
# just sees it as completely new data.
texts_new = common_texts[0:3]
author2doc_new = {'jill': [0], 'bob': [0, 1], 'sally': [1, 2]}
dictionary_new = Dictionary(texts_new)
corpus_new = [dictionary_new.doc2bow(text) for text in texts_new]
class TestAuthorTopicModel(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = atmodel.AuthorTopicModel
self.model = self.class_(corpus, id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
def testTransform(self):
passed = False
# sometimes, training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(25): # restart at most 5 times
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100, random_state=0)
model.update(corpus, author2doc)
jill_topics = model.get_author_topics('jill')
# NOTE: this test may easily fail if the author-topic model is altered in any way. The model's
# output is sensitive to a lot of things, like the scheduling of the updates, or like the
# author2id (because the random initialization changes when author2id changes). If it does
# fail, simply be aware of whether we broke something, or if it just naturally changed the
# output of the model slightly.
vec = matutils.sparse2full(jill_topics, 2) # convert to dense vector, for easier equality tests
expected = [0.91, 0.08]
# must contain the same values, up to re-ordering
passed = np.allclose(sorted(vec), sorted(expected), atol=1e-1)
if passed:
break
logging.warning(
"Author-topic model failed to converge on attempt %i (got %s, expected %s)",
i, sorted(vec), sorted(expected)
)
self.assertTrue(passed)
def testBasic(self):
# Check that training the model produces a positive topic vector for some author
# Otherwise, many of the other tests are invalid.
model = self.class_(corpus, author2doc=author2doc, id2word=dictionary, num_topics=2)
jill_topics = model.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
self.assertTrue(all(jill_topics > 0))
def testAuthor2docMissing(self):
# Check that the results are the same if author2doc is constructed automatically from doc2author.
model = self.class_(
corpus, author2doc=author2doc, doc2author=doc2author,
id2word=dictionary, num_topics=2, random_state=0
)
model2 = self.class_(
corpus, doc2author=doc2author, id2word=dictionary,
num_topics=2, random_state=0
)
# Compare Jill's topics before in both models.
jill_topics = model.get_author_topics('jill')
jill_topics2 = model2.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))
def testDoc2authorMissing(self):
# Check that the results are the same if doc2author is constructed automatically from author2doc.
model = self.class_(
corpus, author2doc=author2doc, doc2author=doc2author,
id2word=dictionary, num_topics=2, random_state=0
)
model2 = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
num_topics=2, random_state=0
)
# Compare Jill's topics before in both models.
jill_topics = model.get_author_topics('jill')
jill_topics2 = model2.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))
def testUpdate(self):
# Check that calling update after the model already has been trained works.
model = self.class_(corpus, author2doc=author2doc, id2word=dictionary, num_topics=2)
jill_topics = model.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
model.update()
jill_topics2 = model.get_author_topics('jill')
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
# Did we learn something?
self.assertFalse(all(np.equal(jill_topics, jill_topics2)))
def testUpdateNewDataOldAuthor(self):
# Check that calling update with new documents and/or authors after the model already has
# been trained works.
# Test an author that already existed in the old dataset.
model = self.class_(corpus, author2doc=author2doc, id2word=dictionary, num_topics=2)
jill_topics = model.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
model.update(corpus_new, author2doc_new)
jill_topics2 = model.get_author_topics('jill')
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
# Did we learn more about Jill?
self.assertFalse(all(np.equal(jill_topics, jill_topics2)))
def testUpdateNewDataNewAuthor(self):
# Check that calling update with new documents and/or authors after the model already has
# been trained works.
# Test a new author, that didn't exist in the old dataset.
model = self.class_(corpus, author2doc=author2doc, id2word=dictionary, num_topics=2)
model.update(corpus_new, author2doc_new)
# Did we learn something about Sally?
sally_topics = model.get_author_topics('sally')
sally_topics = matutils.sparse2full(sally_topics, model.num_topics)
self.assertTrue(all(sally_topics > 0))
def testSerialized(self):
# Test the model using serialized corpora. Basic tests, plus test of update functionality.
model = self.class_(
self.corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
serialized=True, serialization_path=datapath('testcorpus_serialization.mm')
)
jill_topics = model.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
self.assertTrue(all(jill_topics > 0))
model.update()
jill_topics2 = model.get_author_topics('jill')
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
# Did we learn more about Jill?
self.assertFalse(all(np.equal(jill_topics, jill_topics2)))
model.update(corpus_new, author2doc_new)
# Did we learn something about Sally?
sally_topics = model.get_author_topics('sally')
sally_topics = matutils.sparse2full(sally_topics, model.num_topics)
self.assertTrue(all(sally_topics > 0))
# Delete the MmCorpus used for serialization inside the author-topic model.
remove(datapath('testcorpus_serialization.mm'))
def testTransformSerialized(self):
# Same as testTransform, using serialized corpora.
passed = False
# sometimes, training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(25): # restart at most 5 times
# create the transformation model
model = self.class_(
id2word=dictionary, num_topics=2, passes=100, random_state=0,
serialized=True, serialization_path=datapath('testcorpus_serialization.mm')
)
model.update(self.corpus, author2doc)
jill_topics = model.get_author_topics('jill')
# NOTE: this test may easily fail if the author-topic model is altered in any way. The model's
# output is sensitive to a lot of things, like the scheduling of the updates, or like the
# author2id (because the random initialization changes when author2id changes). If it does
# fail, simply be aware of whether we broke something, or if it just naturally changed the
# output of the model slightly.
vec = matutils.sparse2full(jill_topics, 2) # convert to dense vector, for easier equality tests
expected = [0.91, 0.08]
# must contain the same values, up to re-ordering
passed = np.allclose(sorted(vec), sorted(expected), atol=1e-1)
# Delete the MmCorpus used for serialization inside the author-topic model.
remove(datapath('testcorpus_serialization.mm'))
if passed:
break
logging.warning(
"Author-topic model failed to converge on attempt %i (got %s, expected %s)",
i, sorted(vec), sorted(expected)
)
self.assertTrue(passed)
def testAlphaAuto(self):
model1 = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
alpha='symmetric', passes=10, num_topics=2
)
modelauto = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
alpha='auto', passes=10, num_topics=2
)
# did we learn something?
self.assertFalse(all(np.equal(model1.alpha, modelauto.alpha)))
def testAlpha(self):
kwargs = dict(
author2doc=author2doc,
id2word=dictionary,
num_topics=2,
alpha=None
)
expected_shape = (2,)
# should not raise anything
self.class_(**kwargs)
kwargs['alpha'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == np.array([0.5, 0.5])))
kwargs['alpha'] = 'asymmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(np.allclose(model.alpha, [0.630602, 0.369398]))
kwargs['alpha'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == np.array([0.3, 0.3])))
kwargs['alpha'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == np.array([3, 3])))
kwargs['alpha'] = [0.3, 0.3]
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == np.array([0.3, 0.3])))
kwargs['alpha'] = np.array([0.3, 0.3])
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == np.array([0.3, 0.3])))
# all should raise an exception for being wrong shape
kwargs['alpha'] = [0.3, 0.3, 0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [[0.3], [0.3]]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
def testEtaAuto(self):
model1 = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
eta='symmetric', passes=10, num_topics=2
)
modelauto = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
eta='auto', passes=10, num_topics=2
)
# did we learn something?
self.assertFalse(all(np.equal(model1.eta, modelauto.eta)))
def testEta(self):
kwargs = dict(
author2doc=author2doc,
id2word=dictionary,
num_topics=2,
eta=None
)
num_terms = len(dictionary)
expected_shape = (num_terms,)
# should not raise anything
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([0.5] * num_terms)))
kwargs['eta'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([0.5] * num_terms)))
kwargs['eta'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([0.3] * num_terms)))
kwargs['eta'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([3] * num_terms)))
kwargs['eta'] = [0.3] * num_terms
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([0.3] * num_terms)))
kwargs['eta'] = np.array([0.3] * num_terms)
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == np.array([0.3] * num_terms)))
# should be ok with num_topics x num_terms
testeta = np.array([[0.5] * len(dictionary)] * 2)
kwargs['eta'] = testeta
self.class_(**kwargs)
# all should raise an exception for being wrong shape
kwargs['eta'] = testeta.reshape(tuple(reversed(testeta.shape)))
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3] * (num_terms + 1)
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
kwargs['eta'] = "asymmetric"
self.assertRaises(ValueError, self.class_, **kwargs)
def testTopTopics(self):
top_topics = self.model.top_topics(corpus)
for topic, score in top_topics:
self.assertTrue(isinstance(topic, list))
self.assertTrue(isinstance(score, float))
for v, k in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, float))
def testGetTopicTerms(self):
topic_terms = self.model.get_topic_terms(1)
for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(isinstance(v, float))
def testGetAuthorTopics(self):
model = self.class_(
corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
passes=100, random_state=np.random.seed(0)
)
author_topics = []
for a in model.id2author.values():
author_topics.append(model.get_author_topics(a))
for topic in author_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, int))
self.assertTrue(isinstance(v, float))
def testTermTopics(self):
model = self.class_(
corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
passes=100, random_state=np.random.seed(0)
)
# check with word_type
result = model.get_term_topics(2)
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(probability, float))
# if user has entered word instead, check with word
result = model.get_term_topics(str(model.id2word[2]))
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(probability, float))
def testPasses(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
test_rhots = []
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2)
def final_rhot(model):
return pow(model.offset + (1 * model.num_updates) / model.chunksize, -model.decay)
# generate 5 updates to test rhot on
for _ in range(5):
model.update(corpus, author2doc)
test_rhots.append(final_rhot(model))
for passes in [1, 5, 10, 50, 100]:
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2, passes=passes)
self.assertEqual(final_rhot(model), 1.0)
# make sure the rhot matches the test after each update
for test_rhot in test_rhots:
model.update(corpus, author2doc)
msg = "{}, {}, {}".format(passes, model.num_updates, model.state.numdocs)
self.assertAlmostEqual(final_rhot(model), test_rhot, msg=msg)
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))
def testPersistence(self):
fname = get_tmpfile('gensim_models_atmodel.tst')
model = self.model
model.save(fname)
model2 = self.class_.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
self.assertTrue(np.allclose(model.state.gamma, model2.state.gamma))
def testPersistenceIgnore(self):
fname = get_tmpfile('gensim_models_atmodel_testPersistenceIgnore.tst')
model = atmodel.AuthorTopicModel(corpus, author2doc=author2doc, num_topics=2)
model.save(fname, ignore='id2word')
model2 = atmodel.AuthorTopicModel.load(fname)
self.assertTrue(model2.id2word is None)
model.save(fname, ignore=['id2word'])
model2 = atmodel.AuthorTopicModel.load(fname)
self.assertTrue(model2.id2word is None)
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_atmodel.tst.gz')
model = self.model
model.save(fname)
model2 = self.class_.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
# Compare Jill's topics before and after save/load.
jill_topics = model.get_author_topics('jill')
jill_topics2 = model2.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))
def testLargeMmap(self):
fname = get_tmpfile('gensim_models_atmodel.tst')
model = self.model
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
model2 = self.class_.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, np.memmap))
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
# Compare Jill's topics before and after save/load.
jill_topics = model.get_author_topics('jill')
jill_topics2 = model2.get_author_topics('jill')
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))
def testLargeMmapCompressed(self):
fname = get_tmpfile('gensim_models_atmodel.tst.gz')
model = self.model
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')
def testDtypeBackwardCompatibility(self):
atmodel_3_0_1_fname = datapath('atmodel_3_0_1_model')
expected_topics = [(0, 0.068200842977296727), (1, 0.93179915702270333)]
# save model to use in test
# self.model.save(atmodel_3_0_1_fname)
# load a model saved using a 3.0.1 version of Gensim
model = self.class_.load(atmodel_3_0_1_fname)
# and test it on a predefined document
topics = model['jane']
self.assertTrue(np.allclose(expected_topics, topics))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 22,939 | 39.673759 | 109 | py |
poincare_glove | poincare_glove-master/gensim/test/test_corpora.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking corpus I/O formats (the corpora package).
"""
from __future__ import unicode_literals
import codecs
import itertools
import logging
import os.path
import tempfile
import unittest
import numpy as np
from gensim.corpora import (bleicorpus, mmcorpus, lowcorpus, svmlightcorpus,
ucicorpus, malletcorpus, textcorpus, indexedcorpus, wikicorpus)
from gensim.interfaces import TransformedCorpus
from gensim.utils import to_unicode
from gensim.test.utils import datapath, get_tmpfile
class DummyTransformer(object):
def __getitem__(self, bow):
if len(next(iter(bow))) == 2:
# single bag of words
transformed = [(termid, count + 1) for termid, count in bow]
else:
# sliced corpus
transformed = [[(termid, count + 1) for termid, count in doc] for doc in bow]
return transformed
class CorpusTestCase(unittest.TestCase):
TEST_CORPUS = [[(1, 1.0)], [], [(0, 0.5), (2, 1.0)], []]
def setUp(self):
self.corpus_class = None
self.file_extension = None
def run(self, result=None):
if type(self) is not CorpusTestCase:
super(CorpusTestCase, self).run(result)
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('gensim_corpus.tst')
extensions = ['', '', '.bz2', '.gz', '.index', '.vocab']
for ext in itertools.permutations(extensions, 2):
try:
os.remove(fname + ext[0] + ext[1])
except OSError:
pass
def test_load(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
docs = list(corpus)
# the deerwester corpus always has nine documents
self.assertEqual(len(docs), 9)
def test_len(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
# make sure corpus.index works, too
corpus = self.corpus_class(fname)
self.assertEqual(len(corpus), 9)
# for subclasses of IndexedCorpus, we need to nuke this so we don't
# test length on the index, but just testcorpus contents
if hasattr(corpus, 'index'):
corpus.index = None
self.assertEqual(len(corpus), 9)
def test_empty_input(self):
tmpf = get_tmpfile('gensim_corpus.tst')
with open(tmpf, 'w') as f:
f.write('')
with open(tmpf + '.vocab', 'w') as f:
f.write('')
corpus = self.corpus_class(tmpf)
self.assertEqual(len(corpus), 0)
docs = list(corpus)
self.assertEqual(len(docs), 0)
def test_save(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
# make sure the corpus can be saved
self.corpus_class.save_corpus(tmpf, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = list(self.corpus_class(tmpf))
self.assertEqual(corpus, corpus2)
def test_serialize(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
# make sure the corpus can be saved
self.corpus_class.serialize(tmpf, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(tmpf)
self.assertEqual(corpus, list(corpus2))
# make sure the indexing corpus[i] works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
# make sure that subclasses of IndexedCorpus support fancy indexing
# after deserialisation
if isinstance(corpus, indexedcorpus.IndexedCorpus):
idx = [1, 3, 5, 7]
self.assertEqual(corpus[idx], corpus2[idx])
def test_serialize_compressed(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
for extension in ['.gz', '.bz2']:
fname = tmpf + extension
# make sure the corpus can be saved
self.corpus_class.serialize(fname, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(fname)
self.assertEqual(corpus, list(corpus2))
# make sure the indexing `corpus[i]` syntax works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
def test_switch_id2word(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
if hasattr(corpus, 'id2word'):
firstdoc = next(iter(corpus))
testdoc = set((to_unicode(corpus.id2word[x]), y) for x, y in firstdoc)
self.assertEqual(testdoc, {('computer', 1), ('human', 1), ('interface', 1)})
d = corpus.id2word
d[0], d[1] = d[1], d[0]
corpus.id2word = d
firstdoc2 = next(iter(corpus))
testdoc2 = set((to_unicode(corpus.id2word[x]), y) for x, y in firstdoc2)
self.assertEqual(testdoc2, {('computer', 1), ('human', 1), ('interface', 1)})
def test_indexing(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
docs = list(corpus)
for idx, doc in enumerate(docs):
self.assertEqual(doc, corpus[idx])
self.assertEqual(doc, corpus[np.int64(idx)])
self.assertEqual(docs, list(corpus[:]))
self.assertEqual(docs[0:], list(corpus[0:]))
self.assertEqual(docs[0:-1], list(corpus[0:-1]))
self.assertEqual(docs[2:4], list(corpus[2:4]))
self.assertEqual(docs[::2], list(corpus[::2]))
self.assertEqual(docs[::-1], list(corpus[::-1]))
# make sure sliced corpora can be iterated over multiple times
c = corpus[:]
self.assertEqual(docs, list(c))
self.assertEqual(docs, list(c))
self.assertEqual(len(docs), len(corpus))
self.assertEqual(len(docs), len(corpus[:]))
self.assertEqual(len(docs[::2]), len(corpus[::2]))
def _get_slice(corpus, slice_):
# assertRaises for python 2.6 takes a callable
return corpus[slice_]
# make sure proper input validation for sliced corpora is done
self.assertRaises(ValueError, _get_slice, corpus, {1})
self.assertRaises(ValueError, _get_slice, corpus, 1.0)
# check sliced corpora that use fancy indexing
c = corpus[[1, 3, 4]]
self.assertEqual([d for i, d in enumerate(docs) if i in [1, 3, 4]], list(c))
self.assertEqual([d for i, d in enumerate(docs) if i in [1, 3, 4]], list(c))
self.assertEqual(len(corpus[[0, 1, -1]]), 3)
self.assertEqual(len(corpus[np.asarray([0, 1, -1])]), 3)
# check that TransformedCorpus supports indexing when the underlying
# corpus does, and throws an error otherwise
corpus_ = TransformedCorpus(DummyTransformer(), corpus)
if hasattr(corpus, 'index') and corpus.index is not None:
self.assertEqual(corpus_[0][0][1], docs[0][0][1] + 1)
self.assertRaises(ValueError, _get_slice, corpus_, {1})
transformed_docs = [val + 1 for i, d in enumerate(docs) for _, val in d if i in [1, 3, 4]]
self.assertEqual(transformed_docs, list(v for doc in corpus_[[1, 3, 4]] for _, v in doc))
self.assertEqual(3, len(corpus_[[1, 3, 4]]))
else:
self.assertRaises(RuntimeError, _get_slice, corpus_, [1, 3, 4])
self.assertRaises(RuntimeError, _get_slice, corpus_, {1})
self.assertRaises(RuntimeError, _get_slice, corpus_, 1.0)
class TestMmCorpusWithIndex(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_with_index.mm'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_closed_file_object(self):
file_obj = open(datapath('testcorpus.mm'))
f = file_obj.closed
mmcorpus.MmCorpus(file_obj)
s = file_obj.closed
self.assertEqual(f, 0)
self.assertEqual(s, 0)
def test_load(self):
self.assertEqual(self.corpus.num_docs, 9)
self.assertEqual(self.corpus.num_terms, 12)
self.assertEqual(self.corpus.num_nnz, 28)
# confirm we can iterate and that document values match expected for first three docs
it = iter(self.corpus)
self.assertEqual(next(it), [(0, 1.0), (1, 1.0), (2, 1.0)])
self.assertEqual(next(it), [(0, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0)])
self.assertEqual(next(it), [(2, 1.0), (5, 1.0), (7, 1.0), (8, 1.0)])
# confirm that accessing document by index works
self.assertEqual(self.corpus[3], [(1, 1.0), (5, 2.0), (8, 1.0)])
self.assertEqual(tuple(self.corpus.index), (97, 121, 169, 201, 225, 249, 258, 276, 303))
class TestMmCorpusNoIndex(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_no_index.mm'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_load(self):
self.assertEqual(self.corpus.num_docs, 9)
self.assertEqual(self.corpus.num_terms, 12)
self.assertEqual(self.corpus.num_nnz, 28)
# confirm we can iterate and that document values match expected for first three docs
it = iter(self.corpus)
self.assertEqual(next(it), [(0, 1.0), (1, 1.0), (2, 1.0)])
self.assertEqual(next(it), [])
self.assertEqual(next(it), [(2, 0.42371910849), (5, 0.6625174), (7, 1.0), (8, 1.0)])
# confirm that accessing document by index fails
self.assertRaises(RuntimeError, lambda: self.corpus[3])
class TestMmCorpusNoIndexGzip(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_no_index.mm.gz'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_load(self):
self.assertEqual(self.corpus.num_docs, 9)
self.assertEqual(self.corpus.num_terms, 12)
self.assertEqual(self.corpus.num_nnz, 28)
# confirm we can iterate and that document values match expected for first three docs
it = iter(self.corpus)
self.assertEqual(next(it), [(0, 1.0), (1, 1.0), (2, 1.0)])
self.assertEqual(next(it), [])
self.assertEqual(next(it), [(2, 0.42371910849), (5, 0.6625174), (7, 1.0), (8, 1.0)])
# confirm that accessing document by index fails
self.assertRaises(RuntimeError, lambda: self.corpus[3])
class TestMmCorpusNoIndexBzip(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_no_index.mm.bz2'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_load(self):
self.assertEqual(self.corpus.num_docs, 9)
self.assertEqual(self.corpus.num_terms, 12)
self.assertEqual(self.corpus.num_nnz, 28)
# confirm we can iterate and that document values match expected for first three docs
it = iter(self.corpus)
self.assertEqual(next(it), [(0, 1.0), (1, 1.0), (2, 1.0)])
self.assertEqual(next(it), [])
self.assertEqual(next(it), [(2, 0.42371910849), (5, 0.6625174), (7, 1.0), (8, 1.0)])
# confirm that accessing document by index fails
self.assertRaises(RuntimeError, lambda: self.corpus[3])
class TestMmCorpusCorrupt(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_corrupt.mm'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_load(self):
self.assertRaises(ValueError, lambda: [doc for doc in self.corpus])
class TestSvmLightCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = svmlightcorpus.SvmLightCorpus
self.file_extension = '.svmlight'
class TestBleiCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = bleicorpus.BleiCorpus
self.file_extension = '.blei'
def test_save_format_for_dtm(self):
corpus = [[(1, 1.0)], [], [(0, 5.0), (2, 1.0)], []]
test_file = get_tmpfile('gensim_corpus.tst')
self.corpus_class.save_corpus(test_file, corpus)
with open(test_file) as f:
for line in f:
# unique_word_count index1:count1 index2:count2 ... indexn:counnt
tokens = line.split()
words_len = int(tokens[0])
if words_len > 0:
tokens = tokens[1:]
else:
tokens = []
self.assertEqual(words_len, len(tokens))
for token in tokens:
word, count = token.split(':')
self.assertEqual(count, str(int(count)))
class TestLowCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = lowcorpus.LowCorpus
self.file_extension = '.low'
class TestUciCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = ucicorpus.UciCorpus
self.file_extension = '.uci'
def test_serialize_compressed(self):
# UciCorpus needs file write with seek => doesn't support compressed output (only input)
pass
class TestMalletCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = malletcorpus.MalletCorpus
self.file_extension = '.mallet'
def test_load_with_metadata(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
corpus.metadata = True
self.assertEqual(len(corpus), 9)
docs = list(corpus)
self.assertEqual(len(docs), 9)
for i, docmeta in enumerate(docs):
doc, metadata = docmeta
self.assertEqual(metadata[0], str(i + 1))
self.assertEqual(metadata[1], 'en')
class TestTextCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = textcorpus.TextCorpus
self.file_extension = '.txt'
def test_load_with_metadata(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
corpus.metadata = True
self.assertEqual(len(corpus), 9)
docs = list(corpus)
self.assertEqual(len(docs), 9)
for i, docmeta in enumerate(docs):
doc, metadata = docmeta
self.assertEqual(metadata[0], i)
def test_default_preprocessing(self):
lines = [
"Šéf chomutovských komunistů dostal poštou bílý prášek",
"this is a test for stopwords",
"zf tooth spaces "
]
expected = [
['Sef', 'chomutovskych', 'komunistu', 'dostal', 'postou', 'bily', 'prasek'],
['test', 'stopwords'],
['tooth', 'spaces']
]
corpus = self.corpus_from_lines(lines)
texts = list(corpus.get_texts())
self.assertEqual(expected, texts)
def corpus_from_lines(self, lines):
fpath = tempfile.mktemp()
with codecs.open(fpath, 'w', encoding='utf8') as f:
f.write('\n'.join(lines))
return self.corpus_class(fpath)
def test_sample_text(self):
lines = ["document%d" % i for i in range(10)]
corpus = self.corpus_from_lines(lines)
corpus.tokenizer = lambda text: text.split()
docs = [doc for doc in corpus.get_texts()]
sample1 = list(corpus.sample_texts(1))
self.assertEqual(len(sample1), 1)
self.assertIn(sample1[0], docs)
sample2 = list(corpus.sample_texts(len(lines)))
self.assertEqual(len(sample2), len(corpus))
for i in range(len(corpus)):
self.assertEqual(sample2[i], ["document%s" % i])
with self.assertRaises(ValueError):
list(corpus.sample_texts(len(corpus) + 1))
with self.assertRaises(ValueError):
list(corpus.sample_texts(-1))
def test_sample_text_length(self):
lines = ["document%d" % i for i in range(10)]
corpus = self.corpus_from_lines(lines)
corpus.tokenizer = lambda text: text.split()
sample1 = list(corpus.sample_texts(1, length=1))
self.assertEqual(sample1[0], ["document0"])
sample2 = list(corpus.sample_texts(2, length=2))
self.assertEqual(sample2[0], ["document0"])
self.assertEqual(sample2[1], ["document1"])
def test_sample_text_seed(self):
lines = ["document%d" % i for i in range(10)]
corpus = self.corpus_from_lines(lines)
sample1 = list(corpus.sample_texts(5, seed=42))
sample2 = list(corpus.sample_texts(5, seed=42))
self.assertEqual(sample1, sample2)
def test_save(self):
pass
def test_serialize(self):
pass
def test_serialize_compressed(self):
pass
def test_indexing(self):
pass
# Needed for the test_custom_tokenizer is the TestWikiCorpus class.
# Cannot be nested due to serializing.
def custom_tokenizer(content, token_min_len=2, token_max_len=15, lower=True):
return [
to_unicode(token.lower()) if lower else to_unicode(token) for token in content.split()
if token_min_len <= len(token) <= token_max_len and not token.startswith('_')
]
class TestWikiCorpus(TestTextCorpus):
def setUp(self):
self.corpus_class = wikicorpus.WikiCorpus
self.file_extension = '.xml.bz2'
self.fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
self.enwiki = datapath('enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2')
def test_default_preprocessing(self):
expected = ['computer', 'human', 'interface']
corpus = self.corpus_class(self.fname, article_min_tokens=0)
first_text = next(corpus.get_texts())
self.assertEqual(expected, first_text)
def test_len(self):
# When there is no min_token limit all 9 articles must be registered.
corpus = self.corpus_class(self.fname, article_min_tokens=0)
all_articles = corpus.get_texts()
assert (len(list(all_articles)) == 9)
# With a huge min_token limit, all articles should be filtered out.
corpus = self.corpus_class(self.fname, article_min_tokens=100000)
all_articles = corpus.get_texts()
assert (len(list(all_articles)) == 0)
def test_load_with_metadata(self):
corpus = self.corpus_class(self.fname, article_min_tokens=0)
corpus.metadata = True
self.assertEqual(len(corpus), 9)
docs = list(corpus)
self.assertEqual(len(docs), 9)
for i, docmeta in enumerate(docs):
doc, metadata = docmeta
article_no = i + 1 # Counting IDs from 1
self.assertEqual(metadata[0], str(article_no))
self.assertEqual(metadata[1], 'Article%d' % article_no)
def test_load(self):
corpus = self.corpus_class(self.fname, article_min_tokens=0)
docs = list(corpus)
# the deerwester corpus always has nine documents
self.assertEqual(len(docs), 9)
def test_first_element(self):
"""
First two articles in this sample are
1) anarchism
2) autism
"""
corpus = self.corpus_class(self.enwiki, processes=1)
texts = corpus.get_texts()
self.assertTrue(u'anarchism' in next(texts))
self.assertTrue(u'autism' in next(texts))
def test_unicode_element(self):
"""
First unicode article in this sample is
1) папа
"""
bgwiki = datapath('bgwiki-latest-pages-articles-shortened.xml.bz2')
corpus = self.corpus_class(bgwiki)
texts = corpus.get_texts()
self.assertTrue(u'папа' in next(texts))
def test_custom_tokenizer(self):
"""
define a custom tokenizer function and use it
"""
wc = self.corpus_class(self.enwiki, processes=1, lemmatize=False, tokenizer_func=custom_tokenizer,
token_max_len=16, token_min_len=1, lower=False)
row = wc.get_texts()
list_tokens = next(row)
self.assertTrue(u'Anarchism' in list_tokens)
self.assertTrue(u'collectivization' in list_tokens)
self.assertTrue(u'a' in list_tokens)
self.assertTrue(u'i.e.' in list_tokens)
def test_lower_case_set_true(self):
"""
Set the parameter lower to True and check that upper case 'Anarchism' token doesnt exist
"""
corpus = self.corpus_class(self.enwiki, processes=1, lower=True, lemmatize=False)
row = corpus.get_texts()
list_tokens = next(row)
self.assertTrue(u'Anarchism' not in list_tokens)
self.assertTrue(u'anarchism' in list_tokens)
def test_lower_case_set_false(self):
"""
Set the parameter lower to False and check that upper case Anarchism' token exists
"""
corpus = self.corpus_class(self.enwiki, processes=1, lower=False, lemmatize=False)
row = corpus.get_texts()
list_tokens = next(row)
self.assertTrue(u'Anarchism' in list_tokens)
self.assertTrue(u'anarchism' in list_tokens)
def test_min_token_len_not_set(self):
"""
Don't set the parameter token_min_len and check that 'a' as a token doesn't exist
Default token_min_len=2
"""
corpus = self.corpus_class(self.enwiki, processes=1, lemmatize=False)
self.assertTrue(u'a' not in next(corpus.get_texts()))
def test_min_token_len_set(self):
"""
Set the parameter token_min_len to 1 and check that 'a' as a token exists
"""
corpus = self.corpus_class(self.enwiki, processes=1, token_min_len=1, lemmatize=False)
self.assertTrue(u'a' in next(corpus.get_texts()))
def test_max_token_len_not_set(self):
"""
Don't set the parameter token_max_len and check that 'collectivisation' as a token doesn't exist
Default token_max_len=15
"""
corpus = self.corpus_class(self.enwiki, processes=1, lemmatize=False)
self.assertTrue(u'collectivization' not in next(corpus.get_texts()))
def test_max_token_len_set(self):
"""
Set the parameter token_max_len to 16 and check that 'collectivisation' as a token exists
"""
corpus = self.corpus_class(self.enwiki, processes=1, token_max_len=16, lemmatize=False)
self.assertTrue(u'collectivization' in next(corpus.get_texts()))
# #TODO: sporadic failure to be investigated
# def test_get_texts_returns_generator_of_lists(self):
# corpus = self.corpus_class(self.enwiki)
# l = corpus.get_texts()
# self.assertEqual(type(l), types.GeneratorType)
# first = next(l)
# self.assertEqual(type(first), list)
# self.assertTrue(isinstance(first[0], bytes) or isinstance(first[0], str))
def test_sample_text(self):
# Cannot instantiate WikiCorpus from lines
pass
def test_sample_text_length(self):
# Cannot instantiate WikiCorpus from lines
pass
def test_sample_text_seed(self):
# Cannot instantiate WikiCorpus from lines
pass
def test_empty_input(self):
# An empty file is not legit XML
pass
class TestTextDirectoryCorpus(unittest.TestCase):
def write_one_level(self, *args):
if not args:
args = ('doc1', 'doc2')
dirpath = tempfile.mkdtemp()
self.write_docs_to_directory(dirpath, *args)
return dirpath
def write_docs_to_directory(self, dirpath, *args):
for doc_num, name in enumerate(args):
with open(os.path.join(dirpath, name), 'w') as f:
f.write('document %d content' % doc_num)
def test_one_level_directory(self):
dirpath = self.write_one_level()
corpus = textcorpus.TextDirectoryCorpus(dirpath)
self.assertEqual(len(corpus), 2)
docs = list(corpus)
self.assertEqual(len(docs), 2)
def write_two_levels(self):
dirpath = self.write_one_level()
next_level = os.path.join(dirpath, 'level_two')
os.mkdir(next_level)
self.write_docs_to_directory(next_level, 'doc1', 'doc2')
return dirpath, next_level
def test_two_level_directory(self):
dirpath, next_level = self.write_two_levels()
corpus = textcorpus.TextDirectoryCorpus(dirpath)
self.assertEqual(len(corpus), 4)
docs = list(corpus)
self.assertEqual(len(docs), 4)
corpus = textcorpus.TextDirectoryCorpus(dirpath, min_depth=1)
self.assertEqual(len(corpus), 2)
docs = list(corpus)
self.assertEqual(len(docs), 2)
corpus = textcorpus.TextDirectoryCorpus(dirpath, max_depth=0)
self.assertEqual(len(corpus), 2)
docs = list(corpus)
self.assertEqual(len(docs), 2)
def test_filename_filtering(self):
dirpath = self.write_one_level('test1.log', 'test1.txt', 'test2.log', 'other1.log')
corpus = textcorpus.TextDirectoryCorpus(dirpath, pattern="test.*\.log")
filenames = list(corpus.iter_filepaths())
expected = [os.path.join(dirpath, name) for name in ('test1.log', 'test2.log')]
self.assertEqual(sorted(expected), sorted(filenames))
corpus.pattern = ".*.txt"
filenames = list(corpus.iter_filepaths())
expected = [os.path.join(dirpath, 'test1.txt')]
self.assertEqual(expected, filenames)
corpus.pattern = None
corpus.exclude_pattern = ".*.log"
filenames = list(corpus.iter_filepaths())
self.assertEqual(expected, filenames)
def test_lines_are_documents(self):
dirpath = tempfile.mkdtemp()
lines = ['doc%d text' % i for i in range(5)]
fpath = os.path.join(dirpath, 'test_file.txt')
with open(fpath, 'w') as f:
f.write('\n'.join(lines))
corpus = textcorpus.TextDirectoryCorpus(dirpath, lines_are_documents=True)
docs = [doc for doc in corpus.getstream()]
self.assertEqual(len(lines), corpus.length) # should have cached
self.assertEqual(lines, docs)
corpus.lines_are_documents = False
docs = [doc for doc in corpus.getstream()]
self.assertEqual(1, corpus.length)
self.assertEqual('\n'.join(lines), docs[0])
def test_non_trivial_structure(self):
"""Test with non-trivial directory structure, shown below:
.
├── 0.txt
├── a_folder
│ └── 1.txt
└── b_folder
├── 2.txt
├── 3.txt
└── c_folder
└── 4.txt
"""
dirpath = tempfile.mkdtemp()
self.write_docs_to_directory(dirpath, '0.txt')
a_folder = os.path.join(dirpath, 'a_folder')
os.mkdir(a_folder)
self.write_docs_to_directory(a_folder, '1.txt')
b_folder = os.path.join(dirpath, 'b_folder')
os.mkdir(b_folder)
self.write_docs_to_directory(b_folder, '2.txt', '3.txt')
c_folder = os.path.join(b_folder, 'c_folder')
os.mkdir(c_folder)
self.write_docs_to_directory(c_folder, '4.txt')
corpus = textcorpus.TextDirectoryCorpus(dirpath)
filenames = list(corpus.iter_filepaths())
base_names = sorted([name[len(dirpath) + 1:] for name in filenames])
expected = sorted([
'0.txt',
'a_folder/1.txt',
'b_folder/2.txt',
'b_folder/3.txt',
'b_folder/c_folder/4.txt'
])
expected = [os.path.normpath(path) for path in expected]
self.assertEqual(expected, base_names)
corpus.max_depth = 1
self.assertEqual(expected[:-1], base_names[:-1])
corpus.min_depth = 1
self.assertEqual(expected[2:-1], base_names[2:-1])
corpus.max_depth = 0
self.assertEqual(expected[2:], base_names[2:])
corpus.pattern = "4.*"
self.assertEqual(expected[-1], base_names[-1])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 29,456 | 35.321825 | 106 | py |
poincare_glove | poincare_glove-master/gensim/test/test_glove2word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Test for gensim.scripts.glove2word2vec.py."""
import logging
import unittest
import os
import sys
import numpy
import gensim
from gensim.utils import check_output
from gensim.test.utils import datapath, get_tmpfile
class TestGlove2Word2Vec(unittest.TestCase):
def setUp(self):
self.datapath = datapath('test_glove.txt')
self.output_file = get_tmpfile('glove2word2vec.test')
def testConversion(self):
check_output(args=[
sys.executable, '-m', 'gensim.scripts.glove2word2vec',
'--input', self.datapath, '--output', self.output_file
])
# test that the converted model loads successfully
try:
self.test_model = gensim.models.KeyedVectors.load_word2vec_format(self.output_file)
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
except Exception:
if os.path.isfile(os.path.join(self.output_file)):
self.fail('model file %s was created but could not be loaded.' % self.output_file)
else:
self.fail(
'model file %s creation failed, check the parameters and input file format.' % self.output_file
)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 1,582 | 32.680851 | 115 | py |
poincare_glove | poincare_glove-master/gensim/test/__init__.py | """
This package contains automated code tests for all other gensim packages.
"""
| 82 | 19.75 | 73 | py |
poincare_glove | poincare_glove-master/gensim/test/test_fasttext_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import numpy
from gensim.models.wrappers import fasttext
from gensim.models import keyedvectors
from gensim.test.utils import datapath, get_tmpfile
logger = logging.getLogger(__name__)
class TestFastText(unittest.TestCase):
def setUp(self):
ft_home = os.environ.get('FT_HOME', None)
self.ft_path = os.path.join(ft_home, 'fasttext') if ft_home else None
self.corpus_file = datapath('lee_background.cor')
self.test_model_file = datapath('lee_fasttext')
self.test_new_model_file = datapath('lee_fasttext_new')
# Load pre-trained model to perform tests in case FastText binary isn't available in test environment
self.test_model = fasttext.FastText.load_fasttext_format(self.test_model_file)
def model_sanity(self, model):
"""Even tiny models trained on any corpus should pass these sanity checks"""
self.assertEqual(model.wv.syn0.shape, (len(model.wv.vocab), model.vector_size))
self.assertEqual(model.wv.syn0_ngrams.shape, (model.num_ngram_vectors, model.vector_size))
def models_equal(self, model1, model2):
self.assertEqual(len(model1.wv.vocab), len(model2.wv.vocab))
self.assertEqual(set(model1.wv.vocab.keys()), set(model2.wv.vocab.keys()))
self.assertTrue(numpy.allclose(model1.wv.syn0, model2.wv.syn0))
self.assertTrue(numpy.allclose(model1.wv.syn0_ngrams, model2.wv.syn0_ngrams))
def testTraining(self):
"""Test self.test_model successfully trained, parameters and weights correctly loaded"""
if self.ft_path is None:
logger.info("FT_HOME env variable not set, skipping test")
return # Use self.skipTest once python < 2.7 is no longer supported
vocab_size, model_size = 1763, 10
tmpf = get_tmpfile('gensim_fasttext_wrapper.tst')
trained_model = fasttext.FastText.train(
self.ft_path, self.corpus_file, size=model_size, output_file=tmpf
)
self.assertEqual(trained_model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(trained_model.wv.vocab), vocab_size)
self.assertEqual(trained_model.wv.syn0_ngrams.shape[1], model_size)
self.model_sanity(trained_model)
# Tests temporary training files deleted
self.assertFalse(os.path.exists('%s.bin' % tmpf))
def testMinCount(self):
"""Tests words with frequency less than `min_count` absent from vocab"""
if self.ft_path is None:
logger.info("FT_HOME env variable not set, skipping test")
return # Use self.skipTest once python < 2.7 is no longer supported
tmpf = get_tmpfile('gensim_fasttext_wrapper.tst')
test_model_min_count_5 = fasttext.FastText.train(
self.ft_path, self.corpus_file, output_file=tmpf, size=10, min_count=5
)
self.assertTrue('forests' not in test_model_min_count_5.wv.vocab)
test_model_min_count_1 = fasttext.FastText.train(
self.ft_path, self.corpus_file, output_file=tmpf, size=10, min_count=1
)
self.assertTrue('forests' in test_model_min_count_1.wv.vocab)
def testModelSize(self):
"""Tests output vector dimensions are the same as the value for `size` param"""
if self.ft_path is None:
logger.info("FT_HOME env variable not set, skipping test")
return # Use self.skipTest once python < 2.7 is no longer supported
tmpf = get_tmpfile('gensim_fasttext_wrapper.tst')
test_model_size_20 = fasttext.FastText.train(
self.ft_path, self.corpus_file, output_file=tmpf, size=20
)
self.assertEqual(test_model_size_20.vector_size, 20)
self.assertEqual(test_model_size_20.wv.syn0.shape[1], 20)
self.assertEqual(test_model_size_20.wv.syn0_ngrams.shape[1], 20)
def testPersistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_fasttext_wrapper.tst')
self.test_model.save(tmpf)
loaded = fasttext.FastText.load(tmpf)
self.models_equal(self.test_model, loaded)
self.test_model.save(tmpf, sep_limit=0)
self.models_equal(self.test_model, fasttext.FastText.load(tmpf))
def testNormalizedVectorsNotSaved(self):
"""Test syn0norm/syn0_ngrams_norm aren't saved in model file"""
tmpf = get_tmpfile('gensim_fasttext_wrapper.tst')
self.test_model.init_sims()
self.test_model.save(tmpf)
loaded = fasttext.FastText.load(tmpf)
self.assertTrue(loaded.wv.syn0norm is None)
self.assertTrue(loaded.wv.syn0_ngrams_norm is None)
wv = self.test_model.wv
wv.save(tmpf)
loaded_kv = keyedvectors.KeyedVectors.load(tmpf)
self.assertTrue(loaded_kv.syn0norm is None)
self.assertTrue(loaded_kv.syn0_ngrams_norm is None)
def testLoadFastTextFormat(self):
"""Test model successfully loaded from fastText .bin file"""
try:
model = fasttext.FastText.load_fasttext_format(self.test_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_model_file, exc))
vocab_size, model_size = 1762, 10
self.assertEqual(model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(model.wv.vocab), vocab_size, model_size)
self.assertEqual(model.wv.syn0_ngrams.shape, (model.num_ngram_vectors, model_size))
expected_vec = [
-0.57144,
-0.0085561,
0.15748,
-0.67855,
-0.25459,
-0.58077,
-0.09913,
1.1447,
0.23418,
0.060007
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
self.assertTrue(numpy.allclose(model["hundred"], expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.23825,
-0.58482,
-0.22276,
-0.41215,
0.91015,
-1.6786,
-0.26724,
0.58818,
0.57828,
0.75801
]
self.assertTrue(numpy.allclose(model["rejection"], expected_vec_oov, atol=1e-4))
self.assertEqual(model.min_count, 5)
self.assertEqual(model.window, 5)
self.assertEqual(model.iter, 5)
self.assertEqual(model.negative, 5)
self.assertEqual(model.sample, 0.0001)
self.assertEqual(model.bucket, 1000)
self.assertEqual(model.wv.max_n, 6)
self.assertEqual(model.wv.min_n, 3)
self.model_sanity(model)
def testLoadFastTextNewFormat(self):
""" Test model successfully loaded from fastText (new format) .bin file """
try:
new_model = fasttext.FastText.load_fasttext_format(self.test_new_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_new_model_file, exc))
vocab_size, model_size = 1763, 10
self.assertEqual(new_model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(new_model.wv.vocab), vocab_size, model_size)
self.assertEqual(new_model.wv.syn0_ngrams.shape, (new_model.num_ngram_vectors, model_size))
expected_vec = [
-0.025627,
-0.11448,
0.18116,
-0.96779,
0.2532,
-0.93224,
0.3929,
0.12679,
-0.19685,
-0.13179
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
self.assertTrue(numpy.allclose(new_model["hundred"], expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.53378,
-0.19,
0.013482,
-0.86767,
-0.21684,
-0.89928,
0.45124,
0.18025,
-0.14128,
0.22508
]
self.assertTrue(numpy.allclose(new_model["rejection"], expected_vec_oov, atol=1e-4))
self.assertEqual(new_model.min_count, 5)
self.assertEqual(new_model.window, 5)
self.assertEqual(new_model.iter, 5)
self.assertEqual(new_model.negative, 5)
self.assertEqual(new_model.sample, 0.0001)
self.assertEqual(new_model.bucket, 1000)
self.assertEqual(new_model.wv.max_n, 6)
self.assertEqual(new_model.wv.min_n, 3)
self.model_sanity(new_model)
def testLoadFileName(self):
""" Test model accepts input as both `/path/to/model` or `/path/to/model.bin` """
self.assertTrue(fasttext.FastText.load_fasttext_format(datapath('lee_fasttext_new')))
self.assertTrue(fasttext.FastText.load_fasttext_format(datapath('lee_fasttext_new.bin')))
def testLoadModelSupervised(self):
"""Test loading model with supervised learning labels"""
with self.assertRaises(NotImplementedError):
fasttext.FastText.load_fasttext_format(datapath('pang_lee_polarity_fasttext'))
def testLoadModelWithNonAsciiVocab(self):
"""Test loading model with non-ascii words in vocab"""
model = fasttext.FastText.load_fasttext_format(datapath('non_ascii_fasttext'))
self.assertTrue(u'který' in model)
try:
vector = model[u'který'] # noqa:F841
except UnicodeDecodeError:
self.fail('Unable to access vector for utf8 encoded non-ascii word')
def testLoadModelNonUtf8Encoding(self):
"""Test loading model with words in user-specified encoding"""
model = fasttext.FastText.load_fasttext_format(datapath('cp852_fasttext'), encoding='cp852')
self.assertTrue(u'který' in model)
try:
vector = model[u'který'] # noqa:F841
except KeyError:
self.fail('Unable to access vector for cp-852 word')
def testNSimilarity(self):
"""Test n_similarity for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(self.test_model.n_similarity(['the'], ['and']), self.test_model.n_similarity(['and'], ['the']))
# Out of vocab check
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['night', 'nights'], ['nights', 'night']), 1.0))
self.assertEqual(
self.test_model.n_similarity(['night'], ['nights']),
self.test_model.n_similarity(['nights'], ['night'])
)
def testSimilarity(self):
"""Test similarity for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertTrue(numpy.allclose(self.test_model.similarity('the', 'the'), 1.0))
self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('and', 'the'))
# Out of vocab check
self.assertTrue(numpy.allclose(self.test_model.similarity('nights', 'nights'), 1.0))
self.assertEqual(self.test_model.similarity('night', 'nights'), self.test_model.similarity('nights', 'night'))
def testMostSimilar(self):
"""Test most_similar for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertEqual(len(self.test_model.most_similar(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('the'), self.test_model.most_similar(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.most_similar(['night', 'nights'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('nights'), self.test_model.most_similar(positive=['nights']))
def testMostSimilarCosmul(self):
"""Test most_similar_cosmul for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertEqual(len(self.test_model.most_similar_cosmul(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(
self.test_model.most_similar_cosmul('the'),
self.test_model.most_similar_cosmul(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.most_similar_cosmul(['night', 'nights'], topn=5)), 5)
self.assertEqual(
self.test_model.most_similar_cosmul('nights'),
self.test_model.most_similar_cosmul(positive=['nights']))
def testLookup(self):
"""Tests word vector lookup for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.vocab)
self.assertTrue(numpy.allclose(self.test_model['night'], self.test_model[['night']]))
# Out of vocab check
self.assertFalse('nights' in self.test_model.wv.vocab)
self.assertTrue(numpy.allclose(self.test_model['nights'], self.test_model[['nights']]))
# Word with no ngrams in model
self.assertRaises(KeyError, lambda: self.test_model['a!@'])
def testContains(self):
"""Tests __contains__ for in-vocab and out-of-vocab words"""
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.vocab)
self.assertTrue('night' in self.test_model)
# Out of vocab check
self.assertFalse('nights' in self.test_model.wv.vocab)
self.assertTrue('nights' in self.test_model)
# Word with no ngrams in model
self.assertFalse('a!@' in self.test_model.wv.vocab)
self.assertFalse('a!@' in self.test_model)
def testWmdistance(self):
"""Tests wmdistance for docs with in-vocab and out-of-vocab words"""
doc = ['night', 'payment']
oov_doc = ['nights', 'forests', 'payments']
ngrams_absent_doc = ['a!@', 'b#$']
dist = self.test_model.wmdistance(doc, oov_doc)
self.assertNotEqual(float('inf'), dist)
dist = self.test_model.wmdistance(doc, ngrams_absent_doc)
self.assertEqual(float('inf'), dist)
def testDoesntMatch(self):
"""Tests doesnt_match for list of out-of-vocab words"""
oov_words = ['nights', 'forests', 'payments']
# Out of vocab check
for word in oov_words:
self.assertFalse(word in self.test_model.wv.vocab)
try:
self.test_model.doesnt_match(oov_words)
except Exception:
self.fail('model.doesnt_match raises exception for oov words')
def testHash(self):
# Tests FastText.ft_hash method return values to those obtained from original C implementation
ft_hash = fasttext.ft_hash('test')
self.assertEqual(ft_hash, 2949673445)
ft_hash = fasttext.ft_hash('word')
self.assertEqual(ft_hash, 1788406269)
def testConsistentDtype(self):
"""Test that the same dtype is returned for OOV words as for words in the vocabulary"""
vocab_word = 'night'
oov_word = 'wordnotpresentinvocabulary'
self.assertIn(vocab_word, self.test_model.wv.vocab)
self.assertNotIn(oov_word, self.test_model.wv.vocab)
vocab_embedding = self.test_model[vocab_word]
oov_embedding = self.test_model[oov_word]
self.assertEqual(vocab_embedding.dtype, oov_embedding.dtype)
def testPersistenceForOldVersions(self):
"""Test backward compatibility for models saved with versions < 3.0.0"""
old_model_path = datapath('ft_model_2.3.0')
loaded_model = fasttext.FastText.load(old_model_path)
self.assertEqual(loaded_model.vector_size, 10)
self.assertEqual(loaded_model.wv.syn0.shape[1], 10)
self.assertEqual(loaded_model.wv.syn0_ngrams.shape[1], 10)
# in-vocab word
in_expected_vec = numpy.array([-2.44566941, -1.54802394, -2.61103821, -1.88549316, 1.02860415,
1.19031894, 2.01627707, 1.98942184, -1.39095843, -0.65036952])
self.assertTrue(numpy.allclose(loaded_model["the"], in_expected_vec, atol=1e-4))
# out-of-vocab word
out_expected_vec = numpy.array([-1.34948218, -0.8686831, -1.51483142, -1.0164026, 0.56272298,
0.66228276, 1.06477463, 1.1355902, -0.80972326, -0.39845538])
self.assertTrue(numpy.allclose(loaded_model["random_word"], out_expected_vec, atol=1e-4))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 17,041 | 44.566845 | 120 | py |
poincare_glove | poincare_glove-master/gensim/test/test_segmentation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for segmentation algorithms in the segmentation module.
"""
import logging
import unittest
import numpy as np
from gensim.topic_coherence import segmentation
from numpy import array
class TestSegmentation(unittest.TestCase):
def setUp(self):
self.topics = [
array([9, 4, 6]),
array([9, 10, 7]),
array([5, 2, 7])
]
def testSOnePre(self):
"""Test s_one_pre segmentation."""
actual = segmentation.s_one_pre(self.topics)
expected = [
[(4, 9), (6, 9), (6, 4)],
[(10, 9), (7, 9), (7, 10)],
[(2, 5), (7, 5), (7, 2)]
]
self.assertTrue(np.allclose(actual, expected))
def testSOneOne(self):
"""Test s_one_one segmentation."""
actual = segmentation.s_one_one(self.topics)
expected = [
[(9, 4), (9, 6), (4, 9), (4, 6), (6, 9), (6, 4)],
[(9, 10), (9, 7), (10, 9), (10, 7), (7, 9), (7, 10)],
[(5, 2), (5, 7), (2, 5), (2, 7), (7, 5), (7, 2)]
]
self.assertTrue(np.allclose(actual, expected))
def testSOneSet(self):
"""Test s_one_set segmentation."""
actual = segmentation.s_one_set(self.topics)
expected = [
[(9, array([9, 4, 6])), (4, array([9, 4, 6])), (6, array([9, 4, 6]))],
[(9, array([9, 10, 7])), (10, array([9, 10, 7])), (7, array([9, 10, 7]))],
[(5, array([5, 2, 7])), (2, array([5, 2, 7])), (7, array([5, 2, 7]))]
]
for s_i in range(len(actual)):
for j in range(len(actual[s_i])):
self.assertEqual(actual[s_i][j][0], expected[s_i][j][0])
self.assertTrue(np.allclose(actual[s_i][j][1], expected[s_i][j][1]))
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 2,046 | 30.015152 | 86 | py |
poincare_glove | poincare_glove-master/gensim/test/simspeed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s CORPUS_DENSE.mm CORPUS_SPARSE.mm [NUMDOCS]
Run speed test of similarity queries. Only use the first NUMDOCS documents of \
each corpus for testing (or use all if no NUMDOCS is given).
The two sample corpora can be downloaded from http://nlp.fi.muni.cz/projekty/gensim/wikismall.tgz
Example: ./simspeed.py wikismall.dense.mm wikismall.sparse.mm 5000
"""
import logging
import sys
import itertools
import os
import math
from time import time
import numpy as np
import gensim
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
corpus_dense = gensim.corpora.MmCorpus(sys.argv[1])
corpus_sparse = gensim.corpora.MmCorpus(sys.argv[2])
NUMTERMS = corpus_sparse.num_terms
if len(sys.argv) > 3:
NUMDOCS = int(sys.argv[3])
corpus_dense = list(itertools.islice(corpus_dense, NUMDOCS))
corpus_sparse = list(itertools.islice(corpus_sparse, NUMDOCS))
# create the query index to be tested (one for dense input, one for sparse)
index_dense = gensim.similarities.MatrixSimilarity(corpus_dense)
index_sparse = gensim.similarities.SparseMatrixSimilarity(corpus_sparse, num_terms=NUMTERMS)
density = 100.0 * index_sparse.index.nnz / (index_sparse.index.shape[0] * index_sparse.index.shape[1])
# Difference between test #1 and test #3 is that the query in #1 is a gensim iterable
# corpus, while in #3, the index is used directly (np arrays). So #1 is slower,
# because it needs to convert sparse vecs to np arrays and normalize them to
# unit length=extra work, which #3 avoids.
query = list(itertools.islice(corpus_dense, 1000))
logging.info(
"test 1 (dense): dense corpus of %i docs vs. index (%i documents, %i dense features)",
len(query), len(index_dense), index_dense.num_features
)
for chunksize in [1, 4, 8, 16, 64, 128, 256, 512, 1024]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
sim = index_dense[chunk]
sims.extend(sim)
else:
sims = [index_dense[vec] for vec in query]
assert len(sims) == len(query) # make sure we have one result for each query document
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(query) / taken, queries / taken
)
# Same comment as for test #1 but vs. test #4.
query = list(itertools.islice(corpus_sparse, 1000))
logging.info(
"test 2 (sparse): sparse corpus of %i docs vs. sparse index (%i documents, %i features, %.2f%% density)",
len(query), len(corpus_sparse), index_sparse.index.shape[1], density
)
for chunksize in [1, 5, 10, 100, 500, 1000]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
sim = index_sparse[chunk]
sims.extend(sim)
else:
sims = [index_sparse[vec] for vec in query]
assert len(sims) == len(query) # make sure we have one result for each query document
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(query) / taken, queries / taken
)
logging.info(
"test 3 (dense): similarity of all vs. all (%i documents, %i dense features)",
len(corpus_dense), index_dense.num_features
)
for chunksize in [0, 1, 4, 8, 16, 64, 128, 256, 512, 1024]:
index_dense.chunksize = chunksize
start = time()
# `sims` stores the entire N x N sim matrix in memory!
# this is not necessary, but i added it to test the accuracy of the result
# (=report mean diff below)
sims = list(index_dense)
taken = time() - start
sims = np.asarray(sims)
if chunksize == 0:
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s)",
chunksize, taken, len(corpus_dense) / taken
)
unchunksizeed = sims
else:
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
diff = np.mean(np.abs(unchunksizeed - sims))
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s), meandiff=%.3e",
chunksize, taken, len(corpus_dense) / taken, queries / taken, diff
)
del sims
index_dense.num_best = 10
logging.info("test 4 (dense): as above, but only ask for the top-10 most similar for each document")
for chunksize in [0, 1, 4, 8, 16, 64, 128, 256, 512, 1024]:
index_dense.chunksize = chunksize
start = time()
sims = list(index_dense)
taken = time() - start
if chunksize == 0:
queries = len(corpus_dense)
else:
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_dense) / taken, queries / taken
)
index_dense.num_best = None
logging.info(
"test 5 (sparse): similarity of all vs. all (%i documents, %i features, %.2f%% density)",
len(corpus_sparse), index_sparse.index.shape[1], density
)
for chunksize in [0, 5, 10, 100, 500, 1000, 5000]:
index_sparse.chunksize = chunksize
start = time()
sims = list(index_sparse)
taken = time() - start
sims = np.asarray(sims)
if chunksize == 0:
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s)",
chunksize, taken, len(corpus_sparse) / taken
)
unchunksizeed = sims
else:
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
diff = np.mean(np.abs(unchunksizeed - sims))
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s), meandiff=%.3e",
chunksize, taken, len(corpus_sparse) / taken, queries / taken, diff
)
del sims
index_sparse.num_best = 10
logging.info("test 6 (sparse): as above, but only ask for the top-10 most similar for each document")
for chunksize in [0, 5, 10, 100, 500, 1000, 5000]:
index_sparse.chunksize = chunksize
start = time()
sims = list(index_sparse)
taken = time() - start
if chunksize == 0:
queries = len(corpus_sparse)
else:
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info(
"chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)",
chunksize, taken, len(corpus_sparse) / taken, queries / taken
)
index_sparse.num_best = None
logging.info("finished running %s", program)
| 7,581 | 39.116402 | 113 | py |
poincare_glove | poincare_glove-master/gensim/test/test_doc2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
from __future__ import with_statement
import logging
import unittest
import os
from six.moves import zip as izip
from collections import namedtuple
from testfixtures import log_capture
import numpy as np
from gensim import utils
from gensim.models import doc2vec, keyedvectors
from gensim.test.utils import datapath, get_tmpfile, common_texts as raw_sentences
class DocsLeeCorpus(object):
def __init__(self, string_tags=False, unicode_tags=False):
self.string_tags = string_tags
self.unicode_tags = unicode_tags
def _tag(self, i):
if self.unicode_tags:
return u'_\xa1_%d' % i
elif self.string_tags:
return '_*%d' % i
return i
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for i, line in enumerate(f):
yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)])
list_corpus = list(DocsLeeCorpus())
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
def load_on_instance():
# Save and load a Doc2Vec Model on instance for test
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(tmpf)
model = doc2vec.Doc2Vec() # should fail at this point
return model.load(tmpf)
class TestDoc2VecModel(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(tmpf)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
def testPersistenceWord2VecFormat(self):
"""Test storing the entire model in word2vec format."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
# test saving both document and word embedding
test_doc_word = get_tmpfile('gensim_doc2vec.dw')
model.save_word2vec_format(test_doc_word, doctag_vec=True, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc_word, binary=True)
self.assertEqual(len(model.wv.vocab) + len(model.docvecs), len(binary_model_dv.vocab))
# test saving document embedding only
test_doc = get_tmpfile('gensim_doc2vec.d')
model.save_word2vec_format(test_doc, doctag_vec=True, word_vec=False, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc, binary=True)
self.assertEqual(len(model.docvecs), len(binary_model_dv.vocab))
# test saving word embedding only
test_word = get_tmpfile('gensim_doc2vec.w')
model.save_word2vec_format(test_word, doctag_vec=False, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_word, binary=True)
self.assertEqual(len(model.wv.vocab), len(binary_model_dv.vocab))
def testLoadOldModel(self):
"""Test loading doc2vec models from previous version"""
model_file = 'doc2vec_old'
model = doc2vec.Doc2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (3955, 100))
self.assertTrue(len(model.wv.vocab) == 3955)
self.assertTrue(len(model.wv.index2word) == 3955)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (3955, ))
self.assertTrue(model.vocabulary.cum_table.shape == (3955, ))
self.assertTrue(model.docvecs.vectors_docs.shape == (300, 100))
self.assertTrue(model.trainables.vectors_docs_lockf.shape == (300, ))
self.assertTrue(model.docvecs.max_rawint == 299)
self.assertTrue(model.docvecs.count == 300)
# Model stored in multiple files
model_file = 'doc2vec_old_sep'
model = doc2vec.Doc2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (3955, 100))
self.assertTrue(len(model.wv.vocab) == 3955)
self.assertTrue(len(model.wv.index2word) == 3955)
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.trainables.vectors_lockf.shape == (3955, ))
self.assertTrue(model.vocabulary.cum_table.shape == (3955, ))
self.assertTrue(model.docvecs.vectors_docs.shape == (300, 100))
self.assertTrue(model.trainables.vectors_docs_lockf.shape == (300, ))
self.assertTrue(model.docvecs.max_rawint == 299)
self.assertTrue(model.docvecs.count == 300)
def test_unicode_in_doctag(self):
"""Test storing document vectors of a model with unicode titles."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(unicode_tags=True), min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
try:
model.save_word2vec_format(tmpf, doctag_vec=True, word_vec=True, binary=True)
except UnicodeEncodeError:
self.fail('Failed storing unicode title.')
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
# test storing the internal arrays into separate files
model.save(tmpf, sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf, mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs[np.int64(0)].shape, (100,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_missing_string_doctag(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertRaises(KeyError, model.docvecs.__getitem__, 'not_a_tag')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs['_*0'].shape, (100,))
self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0]))
self.assertTrue(max(d.offset for d in model.docvecs.doctags.values()) < len(model.docvecs.doctags))
self.assertTrue(
max(
model.docvecs._int_index(str_key, model.docvecs.doctags, model.docvecs.max_rawint)
for str_key in model.docvecs.doctags.keys())
< len(model.docvecs.doctag_syn0)
)
# verify docvecs.most_similar() returns string doctags rather than indexes
self.assertEqual(model.docvecs.offset2doctag[0], model.docvecs.most_similar([model.docvecs[0]])[0][0])
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def test_similarity_unseen_docs(self):
"""Test similarity of out of training sentences"""
rome_str = ['rome', 'italy']
car_str = ['car']
corpus = list(DocsLeeCorpus(True))
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertTrue(
model.docvecs.similarity_unseen_docs(model, rome_str, rome_str) >
model.docvecs.similarity_unseen_docs(model, rome_str, car_str)
)
def model_sanity(self, model, keep_training=True):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = np.int64(8) # doc 8 sydney fires
tennis1 = 6 # doc 6 tennis
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for docid, sim in sims_to_infer].index(fire1)
self.assertLess(f_rank, 10)
# fire2 should be top30 close to fire1
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# sim results should be in clip range if given
clip_sims = \
model.docvecs.most_similar(fire1, clip_start=len(model.docvecs) // 2, clip_end=len(model.docvecs) * 2 // 3)
sims_doc_id = [docid for docid, sim in clip_sims]
for s_id in sims_doc_id:
self.assertTrue(len(model.docvecs) // 2 <= s_id <= len(model.docvecs) * 2 // 3)
# tennis doc should be out-of-place among fire news
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
# fire docs should be closer than fire-tennis
self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1))
# keep training after save
if keep_training:
tmpf = get_tmpfile('gensim_doc2vec.tst')
model.save(tmpf)
loaded = doc2vec.Doc2Vec.load(tmpf)
loaded.train(sentences, total_examples=loaded.corpus_count, epochs=loaded.iter)
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20, workers=1)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20, workers=1)
self.models_equal(model, model2)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dmc_hs(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dbow_neg(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_neg(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dms_neg(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dmc_neg(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_parallel(self):
"""Test doc2vec parallel training."""
if doc2vec.FAST_VERSION < 0: # don't test the plain NumPy version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000)
for workers in [2, 4]:
model = doc2vec.Doc2Vec(corpus, workers=workers)
self.model_sanity(model)
def test_deterministic_hs(self):
"""Test doc2vec results identical with identical RNG seed."""
# hs
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_neg(self):
"""Test doc2vec results identical with identical RNG seed."""
# neg
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_dmc(self):
"""Test doc2vec results identical with identical RNG seed."""
# bigger, dmc
model = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
model2 = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
self.models_equal(model, model2)
def test_mixed_tag_types(self):
"""Ensure alternating int/string tags don't share indexes in doctag_syn0"""
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(mixed_tag_corpus)
expected_length = len(sentences) + len(model.docvecs.doctags) # 9 sentences, 7 unique first tokens
self.assertEqual(len(model.docvecs.doctag_syn0), expected_length)
def models_equal(self, model, model2):
# check words/hidden-weights
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
# check docvecs
self.assertEqual(len(model.docvecs.doctags), len(model2.docvecs.doctags))
self.assertEqual(len(model.docvecs.offset2doctag), len(model2.docvecs.offset2doctag))
self.assertTrue(np.allclose(model.docvecs.doctag_syn0, model2.docvecs.doctag_syn0))
def test_delete_temporary_training_data(self):
"""Test doc2vec model after delete_temporary_training_data"""
for i in [0, 1]:
for j in [0, 1]:
model = doc2vec.Doc2Vec(sentences, size=5, min_count=1, window=4, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(keep_doctags_vectors=False, keep_inference=False)
self.assertTrue(len(model['human']), 10)
self.assertTrue(model.wv.vocab['graph'].count, 5)
self.assertTrue(not hasattr(model, 'syn1'))
self.assertTrue(not hasattr(model, 'syn1neg'))
self.assertTrue(not hasattr(model, 'syn0_lockf'))
self.assertTrue(model.docvecs and not hasattr(model.docvecs, 'doctag_syn0'))
self.assertTrue(model.docvecs and not hasattr(model.docvecs, 'doctag_syn0_lockf'))
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, iter=20
)
model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True)
self.assertTrue(model.docvecs and hasattr(model.docvecs, 'doctag_syn0'))
self.assertTrue(hasattr(model, 'syn1'))
self.model_sanity(model, keep_training=False)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0,
negative=1, alpha=0.05, min_count=2, iter=20
)
model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True)
self.model_sanity(model, keep_training=False)
self.assertTrue(hasattr(model, 'syn1neg'))
def test_word_vec_non_writeable(self):
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
vector = model['says']
with self.assertRaises(ValueError):
vector *= 0
@log_capture()
def testBuildVocabWarning(self, l):
"""Test if logger warning is raised on non-ideal input to a doc2vec model"""
raw_sentences = ['human', 'machine']
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(sentences)
warning = "Each 'words' should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(l))
@log_capture()
def testTrainWarning(self, l):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
raw_sentences = [['human'],
['graph', 'trees']]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec(alpha=0.025, min_alpha=0.025, min_count=1, workers=8, size=5)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(l))
def testLoadOnClassError(self):
"""Test if exception is raised when loading doc2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
# endclass TestDoc2VecModel
if not hasattr(TestDoc2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestDoc2VecModel, 'assertLess', assertLess)
# following code is useful for reproducing paragraph-vectors paper sentiment experiments
class ConcatenatedDoc2Vec(object):
"""
Concatenation of multiple models for reproducing the Paragraph Vectors paper.
Models must have exactly-matching vocabulary and document IDs. (Models should
be trained separately; this wrapper just returns concatenated results.)
"""
def __init__(self, models):
self.models = models
if hasattr(models[0], 'docvecs'):
self.docvecs = ConcatenatedDocvecs([model.docvecs for model in models])
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
def infer_vector(self, document, alpha=0.1, min_alpha=0.0001, steps=5):
return np.concatenate([model.infer_vector(document, alpha, min_alpha, steps) for model in self.models])
def train(self, *ignore_args, **ignore_kwargs):
pass # train subcomponents individually
class ConcatenatedDocvecs(object):
def __init__(self, models):
self.models = models
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
def read_su_sentiment_rotten_tomatoes(dirname, lowercase=True):
"""
Read and return documents from the Stanford Sentiment Treebank
corpus (Rotten Tomatoes reviews), from http://nlp.Stanford.edu/sentiment/
Initialize the corpus from a given directory, where
http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip
has been expanded. It's not too big, so compose entirely into memory.
"""
logging.info("loading corpus from %s", dirname)
# many mangled chars in sentences (datasetSentences.txt)
chars_sst_mangled = [
'à', 'á', 'â', 'ã', 'æ', 'ç', 'è', 'é', 'í',
'í', 'ï', 'ñ', 'ó', 'ô', 'ö', 'û', 'ü'
]
sentence_fixups = [(char.encode('utf-8').decode('latin1'), char) for char in chars_sst_mangled]
# more junk, and the replace necessary for sentence-phrase consistency
sentence_fixups.extend([
('Â', ''),
('\xa0', ' '),
('-LRB-', '('),
('-RRB-', ')'),
])
# only this junk in phrases (dictionary.txt)
phrase_fixups = [('\xa0', ' ')]
# sentence_id and split are only positive for the full sentences
# read sentences to temp {sentence -> (id,split) dict, to correlate with dictionary.txt
info_by_sentence = {}
with open(os.path.join(dirname, 'datasetSentences.txt'), 'r') as sentences:
with open(os.path.join(dirname, 'datasetSplit.txt'), 'r') as splits:
next(sentences) # legend
next(splits) # legend
for sentence_line, split_line in izip(sentences, splits):
(id, text) = sentence_line.split('\t')
id = int(id)
text = text.rstrip()
for junk, fix in sentence_fixups:
text = text.replace(junk, fix)
(id2, split_i) = split_line.split(',')
assert id == int(id2)
if text not in info_by_sentence: # discard duplicates
info_by_sentence[text] = (id, int(split_i))
# read all phrase text
phrases = [None] * 239232 # known size of phrases
with open(os.path.join(dirname, 'dictionary.txt'), 'r') as phrase_lines:
for line in phrase_lines:
(text, id) = line.split('|')
for junk, fix in phrase_fixups:
text = text.replace(junk, fix)
phrases[int(id)] = text.rstrip() # for 1st pass just string
SentimentPhrase = namedtuple('SentimentPhrase', SentimentDocument._fields + ('sentence_id',))
# add sentiment labels, correlate with sentences
with open(os.path.join(dirname, 'sentiment_labels.txt'), 'r') as sentiments:
next(sentiments) # legend
for line in sentiments:
(id, sentiment) = line.split('|')
id = int(id)
sentiment = float(sentiment)
text = phrases[id]
words = text.split()
if lowercase:
words = [word.lower() for word in words]
(sentence_id, split_i) = info_by_sentence.get(text, (None, 0))
split = [None, 'train', 'test', 'dev'][split_i]
phrases[id] = SentimentPhrase(words, [id], split, sentiment, sentence_id)
assert len([phrase for phrase in phrases if phrase.sentence_id is not None]) == len(info_by_sentence) # all
# counts don't match 8544, 2210, 1101 because 13 TRAIN and 1 DEV sentences are duplicates
assert len([phrase for phrase in phrases if phrase.split == 'train']) == 8531 # 'train'
assert len([phrase for phrase in phrases if phrase.split == 'test']) == 2210 # 'test'
assert len([phrase for phrase in phrases if phrase.split == 'dev']) == 1100 # 'dev'
logging.info(
"loaded corpus with %i sentences and %i phrases from %s",
len(info_by_sentence), len(phrases), dirname
)
return phrases
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info("using optimization %s", doc2vec.FAST_VERSION)
unittest.main()
| 25,377 | 42.23339 | 119 | py |
poincare_glove | poincare_glove-master/gensim/test/test_keras_integration.py | import unittest
import numpy as np
from gensim.models import word2vec
try:
from sklearn.datasets import fetch_20newsgroups
except ImportError:
raise unittest.SkipTest("Test requires sklearn to be installed, which is not available")
try:
import keras
from keras.engine import Input
from keras.models import Model
from keras.layers.merge import dot
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten
from keras.layers import Conv1D, MaxPooling1D
except ImportError:
raise unittest.SkipTest("Test requires Keras to be installed, which is not available")
from gensim.test.utils import common_texts
class TestKerasWord2VecWrapper(unittest.TestCase):
def setUp(self):
self.model_cos_sim = word2vec.Word2Vec(common_texts, size=100, min_count=1, hs=1)
self.model_twenty_ng = word2vec.Word2Vec(min_count=1)
def testWord2VecTraining(self):
"""
Test word2vec training.
"""
model = self.model_cos_sim
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 100))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 100))
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testEmbeddingLayerCosineSim(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a simple word similarity task.
"""
keras_w2v_model = self.model_cos_sim
keras_w2v_model_wv = keras_w2v_model.wv
embedding_layer = keras_w2v_model_wv.get_keras_embedding()
input_a = Input(shape=(1,), dtype='int32', name='input_a')
input_b = Input(shape=(1,), dtype='int32', name='input_b')
embedding_a = embedding_layer(input_a)
embedding_b = embedding_layer(input_b)
similarity = dot([embedding_a, embedding_b], axes=2, normalize=True)
model = Model(input=[input_a, input_b], output=similarity)
model.compile(optimizer='sgd', loss='mse')
word_a = 'graph'
word_b = 'trees'
output = model.predict([
np.asarray([keras_w2v_model.wv.vocab[word_a].index]),
np.asarray([keras_w2v_model.wv.vocab[word_b].index])
])
# output is the cosine distance between the two words (as a similarity measure)
self.assertTrue(type(output[0][0][0]) == np.float32) # verify that a float is returned
def testEmbeddingLayer20NewsGroup(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function
for a smaller version of the 20NewsGroup classification problem.
"""
MAX_SEQUENCE_LENGTH = 1000
# Prepare text samples and their labels
# Processing text dataset
texts = [] # list of text samples
texts_w2v = [] # used to train the word embeddings
labels = [] # list of label ids
data = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'comp.graphics', 'sci.space'])
for index in range(len(data)):
label_id = data.target[index]
file_data = data.data[index]
i = file_data.find('\n\n') # skip header
if i > 0:
file_data = file_data[i:]
try:
curr_str = str(file_data)
sentence_list = curr_str.split('\n')
for sentence in sentence_list:
sentence = (sentence.strip()).lower()
texts.append(sentence)
texts_w2v.append(sentence.split(' '))
labels.append(label_id)
except Exception:
pass
# Vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
x_train = data
y_train = labels
# prepare the embedding layer using the wrapper
keras_w2v = self.model_twenty_ng
keras_w2v.build_vocab(texts_w2v)
keras_w2v.train(texts, total_examples=keras_w2v.corpus_count, epochs=keras_w2v.iter)
keras_w2v_wv = keras_w2v.wv
embedding_layer = keras_w2v_wv.get_keras_embedding()
# create a 1D convnet to solve our classification task
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x) # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(y_train.shape[1], activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
fit_ret_val = model.fit(x_train, y_train, epochs=1)
# verify the type of the object returned after training
# value returned is a `History` instance.
# Its `history` attribute contains all information collected during training.
self.assertTrue(type(fit_ret_val) == keras.callbacks.History)
if __name__ == '__main__':
unittest.main()
| 6,028 | 38.927152 | 114 | py |
poincare_glove | poincare_glove-master/gensim/test/test_scripts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Manos Stergiadis <em.stergiadis@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the output of gensim.scripts.
"""
from __future__ import unicode_literals
import json
import logging
import os.path
import unittest
from gensim.scripts.segment_wiki import segment_all_articles, segment_and_write_all_articles
from smart_open import smart_open
from gensim.test.utils import datapath, get_tmpfile
class TestSegmentWiki(unittest.TestCase):
def setUp(self):
self.fname = datapath('enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2')
self.expected_title = 'Anarchism'
self.expected_section_titles = [
'Introduction',
'Etymology and terminology',
'History',
'Anarchist schools of thought',
'Internal issues and debates',
'Topics of interest',
'Criticisms',
'References',
'Further reading',
'External links'
]
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('script.tst')
extensions = ['', '.json']
for ext in extensions:
try:
os.remove(fname + ext)
except OSError:
pass
def test_segment_all_articles(self):
title, sections, interlinks = next(segment_all_articles(self.fname, include_interlinks=True))
# Check title
self.assertEqual(title, self.expected_title)
# Check section titles
section_titles = [s[0] for s in sections]
self.assertEqual(section_titles, self.expected_section_titles)
# Check text
first_section_text = sections[0][1]
first_sentence = "'''Anarchism''' is a political philosophy that advocates self-governed societies"
self.assertTrue(first_sentence in first_section_text)
# Check interlinks
self.assertTrue(interlinks['self-governance'] == 'self-governed')
self.assertTrue(interlinks['Hierarchy'] == 'hierarchical')
self.assertTrue(interlinks['Pierre-Joseph Proudhon'] == 'Proudhon')
def test_generator_len(self):
expected_num_articles = 106
num_articles = sum(1 for x in segment_all_articles(self.fname))
self.assertEqual(num_articles, expected_num_articles)
def test_json_len(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1)
expected_num_articles = 106
num_articles = sum(1 for line in smart_open(tmpf))
self.assertEqual(num_articles, expected_num_articles)
def test_segment_and_write_all_articles(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1, include_interlinks=True)
# Get the first line from the text file we created.
with open(tmpf) as f:
first = next(f)
# decode JSON line into a Python dictionary object
article = json.loads(first)
title, section_titles, interlinks = article['title'], article['section_titles'], article['interlinks']
self.assertEqual(title, self.expected_title)
self.assertEqual(section_titles, self.expected_section_titles)
# Check interlinks
self.assertTrue(interlinks['self-governance'] == 'self-governed')
self.assertTrue(interlinks['Hierarchy'] == 'hierarchical')
self.assertTrue(interlinks['Pierre-Joseph Proudhon'] == 'Proudhon')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 3,744 | 33.357798 | 110 | py |
poincare_glove | poincare_glove-master/gensim/test/test_logentropy_model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import logentropy_model
from gensim.test.utils import datapath, get_tmpfile
class TestLogEntropyModel(unittest.TestCase):
def setUp(self):
self.corpus_small = MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = MmCorpus(datapath('test_corpus_ok.mm'))
def testTransform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [
(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)
]
self.assertTrue(np.allclose(transformed, expected))
def testPersistence(self):
fname = get_tmpfile('gensim_models_logentry.tst')
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_logentry.tst.gz')
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname, mmap=None)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,093 | 32.238095 | 96 | py |
poincare_glove | poincare_glove-master/gensim/test/test_ldaseqmodel.py | """
Tests to check DTM math functions and Topic-Word, Doc-Topic proportions.
"""
import unittest
import logging
import numpy as np # for arrays, array broadcasting etc.
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary
from gensim.test.utils import datapath
class TestLdaSeq(unittest.TestCase):
# we are setting up a DTM model and fitting it, and checking topic-word and doc-topic results.
def setUp(self):
texts = [
[u'senior', u'studios', u'studios', u'studios', u'creators', u'award', u'mobile', u'currently',
u'challenges', u'senior', u'summary', u'senior', u'motivated', u'creative', u'senior'],
[u'performs', u'engineering', u'tasks', u'infrastructure', u'focusing', u'primarily',
u'programming', u'interaction', u'designers', u'engineers', u'leadership', u'teams',
u'teams', u'crews', u'responsibilities', u'engineering', u'quality', u'functional',
u'functional', u'teams', u'organizing', u'prioritizing', u'technical', u'decisions',
u'engineering', u'participates', u'participates', u'reviews', u'participates',
u'hiring', u'conducting', u'interviews'],
[u'feedback', u'departments', u'define', u'focusing', u'engineering', u'teams', u'crews',
u'facilitate', u'engineering', u'departments', u'deadlines', u'milestones', u'typically',
u'spends', u'designing', u'developing', u'updating', u'bugs', u'mentoring', u'engineers',
u'define', u'schedules', u'milestones', u'participating'],
[u'reviews', u'interviews', u'sized', u'teams', u'interacts', u'disciplines', u'knowledge',
u'skills', u'knowledge', u'knowledge', u'xcode', u'scripting', u'debugging', u'skills',
u'skills', u'knowledge', u'disciplines', u'animation', u'networking', u'expertise',
u'competencies', u'oral', u'skills', u'management', u'skills', u'proven', u'effectively',
u'teams', u'deadline', u'environment', u'bachelor', u'minimum', u'shipped', u'leadership',
u'teams', u'location', u'resumes', u'jobs', u'candidates', u'openings', u'jobs'],
[u'maryland', u'client', u'producers', u'electricity', u'operates', u'storage', u'utility',
u'retail', u'customers', u'engineering', u'consultant', u'maryland', u'summary', u'technical',
u'technology', u'departments', u'expertise', u'maximizing', u'output', u'reduces', u'operating',
u'participates', u'areas', u'engineering', u'conducts', u'testing', u'solve', u'supports',
u'environmental', u'understands', u'objectives', u'operates', u'responsibilities', u'handles',
u'complex', u'engineering', u'aspects', u'monitors', u'quality', u'proficiency', u'optimization',
u'recommendations', u'supports', u'personnel', u'troubleshooting', u'commissioning', u'startup',
u'shutdown', u'supports', u'procedure', u'operating', u'units', u'develops', u'simulations',
u'troubleshooting', u'tests', u'enhancing', u'solving', u'develops', u'estimates', u'schedules',
u'scopes', u'understands', u'technical', u'management', u'utilize', u'routine', u'conducts',
u'hazards', u'utilizing', u'hazard', u'operability', u'methodologies', u'participates', u'startup',
u'reviews', u'pssr', u'participate', u'teams', u'participate', u'regulatory', u'audits', u'define',
u'scopes', u'budgets', u'schedules', u'technical', u'management', u'environmental', u'awareness',
u'interfacing', u'personnel', u'interacts', u'regulatory', u'departments', u'input', u'objectives',
u'identifying', u'introducing', u'concepts', u'solutions', u'peers', u'customers', u'coworkers',
u'knowledge', u'skills', u'engineering', u'quality', u'engineering'],
[u'commissioning', u'startup', u'knowledge', u'simulators', u'technologies', u'knowledge',
u'engineering', u'techniques', u'disciplines', u'leadership', u'skills', u'proven',
u'engineers', u'oral', u'skills', u'technical', u'skills', u'analytically', u'solve',
u'complex', u'interpret', u'proficiency', u'simulation', u'knowledge', u'applications',
u'manipulate', u'applications', u'engineering'],
[u'calculations', u'programs', u'matlab', u'excel', u'independently', u'environment',
u'proven', u'skills', u'effectively', u'multiple', u'tasks', u'planning', u'organizational',
u'management', u'skills', u'rigzone', u'jobs', u'developer', u'exceptional', u'strategies',
u'junction', u'exceptional', u'strategies', u'solutions', u'solutions', u'biggest',
u'insurers', u'operates', u'investment'],
[u'vegas', u'tasks', u'electrical', u'contracting', u'expertise', u'virtually', u'electrical',
u'developments', u'institutional', u'utilities', u'technical', u'experts', u'relationships',
u'credibility', u'contractors', u'utility', u'customers', u'customer', u'relationships',
u'consistently', u'innovations', u'profile', u'construct', u'envision', u'dynamic', u'complex',
u'electrical', u'management', u'grad', u'internship', u'electrical', u'engineering',
u'infrastructures', u'engineers', u'documented', u'management', u'engineering',
u'quality', u'engineering', u'electrical', u'engineers', u'complex', u'distribution',
u'grounding', u'estimation', u'testing', u'procedures', u'voltage', u'engineering'],
[u'troubleshooting', u'installation', u'documentation', u'bsee', u'certification',
u'electrical', u'voltage', u'cabling', u'electrical', u'engineering', u'candidates',
u'electrical', u'internships', u'oral', u'skills', u'organizational', u'prioritization',
u'skills', u'skills', u'excel', u'cadd', u'calculation', u'autocad', u'mathcad',
u'skills', u'skills', u'customer', u'relationships', u'solving', u'ethic', u'motivation',
u'tasks', u'budget', u'affirmative', u'diversity', u'workforce', u'gender', u'orientation',
u'disability', u'disabled', u'veteran', u'vietnam', u'veteran', u'qualifying', u'veteran',
u'diverse', u'candidates', u'respond', u'developing', u'workplace', u'reflects', u'diversity',
u'communities', u'reviews', u'electrical', u'contracting', u'southwest', u'electrical', u'contractors'],
[u'intern', u'electrical', u'engineering', u'idexx', u'laboratories', u'validating', u'idexx',
u'integrated', u'hardware', u'entails', u'planning', u'debug', u'validation', u'engineers',
u'validation', u'methodologies', u'healthcare', u'platforms', u'brightest', u'solve',
u'challenges', u'innovation', u'technology', u'idexx', u'intern', u'idexx', u'interns',
u'supplement', u'interns', u'teams', u'roles', u'competitive', u'interns', u'idexx',
u'interns', u'participate', u'internships', u'mentors', u'seminars', u'topics', u'leadership',
u'workshops', u'relevant', u'planning', u'topics', u'intern', u'presentations', u'mixers',
u'applicants', u'ineligible', u'laboratory', u'compliant', u'idexx', u'laboratories', u'healthcare',
u'innovation', u'practicing', u'veterinarians', u'diagnostic', u'technology', u'idexx', u'enhance',
u'veterinarians', u'efficiency', u'economically', u'idexx', u'worldwide', u'diagnostic', u'tests',
u'tests', u'quality', u'headquartered', u'idexx', u'laboratories', u'employs', u'customers',
u'qualifications', u'applicants', u'idexx', u'interns', u'potential', u'demonstrated', u'portfolio',
u'recommendation', u'resumes', u'marketing', u'location', u'americas', u'verification', u'validation',
u'schedule', u'overtime', u'idexx', u'laboratories', u'reviews', u'idexx', u'laboratories',
u'nasdaq', u'healthcare', u'innovation', u'practicing', u'veterinarians'],
[u'location', u'duration', u'temp', u'verification', u'validation', u'tester', u'verification',
u'validation', u'middleware', u'specifically', u'testing', u'applications', u'clinical',
u'laboratory', u'regulated', u'environment', u'responsibilities', u'complex', u'hardware',
u'testing', u'clinical', u'analyzers', u'laboratory', u'graphical', u'interfaces', u'complex',
u'sample', u'sequencing', u'protocols', u'developers', u'correction', u'tracking',
u'tool', u'timely', u'troubleshoot', u'testing', u'functional', u'manual',
u'automated', u'participate', u'ongoing'],
[u'testing', u'coverage', u'planning', u'documentation', u'testing', u'validation',
u'corrections', u'monitor', u'implementation', u'recurrence', u'operating', u'statistical',
u'quality', u'testing', u'global', u'multi', u'teams', u'travel', u'skills', u'concepts',
u'waterfall', u'agile', u'methodologies', u'debugging', u'skills', u'complex', u'automated',
u'instrumentation', u'environment', u'hardware', u'mechanical', u'components', u'tracking',
u'lifecycle', u'management', u'quality', u'organize', u'define', u'priorities', u'organize',
u'supervision', u'aggressive', u'deadlines', u'ambiguity', u'analyze', u'complex', u'situations',
u'concepts', u'technologies', u'verbal', u'skills', u'effectively', u'technical', u'clinical',
u'diverse', u'strategy', u'clinical', u'chemistry', u'analyzer', u'laboratory', u'middleware',
u'basic', u'automated', u'testing', u'biomedical', u'engineering', u'technologists',
u'laboratory', u'technology', u'availability', u'click', u'attach'],
[u'scientist', u'linux', u'asrc', u'scientist', u'linux', u'asrc', u'technology',
u'solutions', u'subsidiary', u'asrc', u'engineering', u'technology', u'contracts'],
[u'multiple', u'agencies', u'scientists', u'engineers', u'management', u'personnel',
u'allows', u'solutions', u'complex', u'aeronautics', u'aviation', u'management', u'aviation',
u'engineering', u'hughes', u'technical', u'technical', u'aviation', u'evaluation',
u'engineering', u'management', u'technical', u'terminal', u'surveillance', u'programs',
u'currently', u'scientist', u'travel', u'responsibilities', u'develops', u'technology',
u'modifies', u'technical', u'complex', u'reviews', u'draft', u'conformity', u'completeness',
u'testing', u'interface', u'hardware', u'regression', u'impact', u'reliability',
u'maintainability', u'factors', u'standardization', u'skills', u'travel', u'programming',
u'linux', u'environment', u'cisco', u'knowledge', u'terminal', u'environment', u'clearance',
u'clearance', u'input', u'output', u'digital', u'automatic', u'terminal', u'management',
u'controller', u'termination', u'testing', u'evaluating', u'policies', u'procedure', u'interface',
u'installation', u'verification', u'certification', u'core', u'avionic', u'programs', u'knowledge',
u'procedural', u'testing', u'interfacing', u'hardware', u'regression', u'impact',
u'reliability', u'maintainability', u'factors', u'standardization', u'missions', u'asrc', u'subsidiaries',
u'affirmative', u'employers', u'applicants', u'disability', u'veteran', u'technology', u'location',
u'airport', u'bachelor', u'schedule', u'travel', u'contributor', u'management', u'asrc', u'reviews'],
[u'technical', u'solarcity', u'niche', u'vegas', u'overview', u'resolving', u'customer',
u'clients', u'expanding', u'engineers', u'developers', u'responsibilities', u'knowledge',
u'planning', u'adapt', u'dynamic', u'environment', u'inventive', u'creative', u'solarcity',
u'lifecycle', u'responsibilities', u'technical', u'analyzing', u'diagnosing', u'troubleshooting',
u'customers', u'ticketing', u'console', u'escalate', u'knowledge', u'engineering', u'timely',
u'basic', u'phone', u'functionality', u'customer', u'tracking', u'knowledgebase', u'rotation',
u'configure', u'deployment', u'sccm', u'technical', u'deployment', u'deploy', u'hardware',
u'solarcity', u'bachelor', u'knowledge', u'dell', u'laptops', u'analytical', u'troubleshooting',
u'solving', u'skills', u'knowledge', u'databases', u'preferably', u'server', u'preferably',
u'monitoring', u'suites', u'documentation', u'procedures', u'knowledge', u'entries', u'verbal',
u'skills', u'customer', u'skills', u'competitive', u'solar', u'package', u'insurance', u'vacation',
u'savings', u'referral', u'eligibility', u'equity', u'performers', u'solarcity', u'affirmative',
u'diversity', u'workplace', u'applicants', u'orientation', u'disability', u'veteran', u'careerrookie'],
[u'embedded', u'exelis', u'junction', u'exelis', u'embedded', u'acquisition', u'networking',
u'capabilities', u'classified', u'customer', u'motivated', u'develops', u'tests',
u'innovative', u'solutions', u'minimal', u'supervision', u'paced', u'environment', u'enjoys',
u'assignments', u'interact', u'multi', u'disciplined', u'challenging', u'focused', u'embedded',
u'developments', u'spanning', u'engineering', u'lifecycle', u'specification', u'enhancement',
u'applications', u'embedded', u'freescale', u'applications', u'android', u'platforms',
u'interface', u'customers', u'developers', u'refine', u'specifications', u'architectures'],
[u'java', u'programming', u'scripts', u'python', u'debug', u'debugging', u'emulators',
u'regression', u'revisions', u'specialized', u'setups', u'capabilities', u'subversion',
u'technical', u'documentation', u'multiple', u'engineering', u'techexpousa', u'reviews'],
[u'modeler', u'semantic', u'modeling', u'models', u'skills', u'ontology', u'resource',
u'framework', u'schema', u'technologies', u'hadoop', u'warehouse', u'oracle', u'relational',
u'artifacts', u'models', u'dictionaries', u'models', u'interface', u'specifications',
u'documentation', u'harmonization', u'mappings', u'aligned', u'coordinate', u'technical',
u'peer', u'reviews', u'stakeholder', u'communities', u'impact', u'domains', u'relationships',
u'interdependencies', u'models', u'define', u'analyze', u'legacy', u'models', u'corporate',
u'databases', u'architectural', u'alignment', u'customer', u'expertise', u'harmonization',
u'modeling', u'modeling', u'consulting', u'stakeholders', u'quality', u'models', u'storage',
u'agile', u'specifically', u'focus', u'modeling', u'qualifications', u'bachelors', u'accredited',
u'modeler', u'encompass', u'evaluation', u'skills', u'knowledge', u'modeling', u'techniques',
u'resource', u'framework', u'schema', u'technologies', u'unified', u'modeling', u'technologies',
u'schemas', u'ontologies', u'sybase', u'knowledge', u'skills', u'interpersonal', u'skills',
u'customers', u'clearance', u'applicants', u'eligibility', u'classified', u'clearance',
u'polygraph', u'techexpousa', u'solutions', u'partnership', u'solutions', u'integration'],
[u'technologies', u'junction', u'develops', u'maintains', u'enhances', u'complex', u'diverse',
u'intensive', u'analytics', u'algorithm', u'manipulation', u'management', u'documented',
u'individually', u'reviews', u'tests', u'components', u'adherence', u'resolves', u'utilizes',
u'methodologies', u'environment', u'input', u'components', u'hardware', u'offs', u'reuse', u'cots',
u'gots', u'synthesis', u'components', u'tasks', u'individually', u'analyzes', u'modifies',
u'debugs', u'corrects', u'integrates', u'operating', u'environments', u'develops', u'queries',
u'databases', u'repositories', u'recommendations', u'improving', u'documentation', u'develops',
u'implements', u'algorithms', u'functional', u'assists', u'developing', u'executing', u'procedures',
u'components', u'reviews', u'documentation', u'solutions', u'analyzing', u'conferring',
u'users', u'engineers', u'analyzing', u'investigating', u'areas', u'adapt', u'hardware',
u'mathematical', u'models', u'predict', u'outcome', u'implement', u'complex', u'database',
u'repository', u'interfaces', u'queries', u'bachelors', u'accredited', u'substituted',
u'bachelors', u'firewalls', u'ipsec', u'vpns', u'technology', u'administering', u'servers',
u'apache', u'jboss', u'tomcat', u'developing', u'interfaces', u'firefox', u'internet',
u'explorer', u'operating', u'mainframe', u'linux', u'solaris', u'virtual', u'scripting',
u'programming', u'oriented', u'programming', u'ajax', u'script', u'procedures', u'cobol',
u'cognos', u'fusion', u'focus', u'html', u'java', u'java', u'script', u'jquery', u'perl',
u'visual', u'basic', u'powershell', u'cots', u'cots', u'oracle', u'apex', u'integration',
u'competitive', u'package', u'bonus', u'corporate', u'equity', u'tuition', u'reimbursement',
u'referral', u'bonus', u'holidays', u'insurance', u'flexible', u'disability', u'insurance'],
[u'technologies', u'disability', u'accommodation', u'recruiter', u'techexpousa'],
['bank', 'river', 'shore', 'water'],
['river', 'water', 'flow', 'fast', 'tree'],
['bank', 'water', 'fall', 'flow'],
['bank', 'bank', 'water', 'rain', 'river'],
['river', 'water', 'mud', 'tree'],
['money', 'transaction', 'bank', 'finance'],
['bank', 'borrow', 'money'],
['bank', 'finance'],
['finance', 'money', 'sell', 'bank'],
['borrow', 'sell'],
['bank', 'loan', 'sell']
]
# initializing using own LDA sufficient statistics so that we get same results each time.
sstats = np.loadtxt(datapath('DTM/sstats_test.txt'))
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
self.ldaseq = ldaseqmodel.LdaSeqModel(
corpus=corpus, id2word=dictionary, num_topics=2,
time_slice=[10, 10, 11], initialize='own', sstats=sstats
)
# testing topic word proportions
def testTopicWord(self):
topics = self.ldaseq.print_topics(0)
expected_topic_word = [('skills', 0.035999999999999997)]
self.assertEqual(topics[0][0][0], expected_topic_word[0][0])
self.assertAlmostEqual(topics[0][0][1], expected_topic_word[0][1], places=2)
# testing document-topic proportions
def testDocTopic(self):
doc_topic = self.ldaseq.doc_topics(0)
expected_doc_topic = 0.00066577896138482028
self.assertAlmostEqual(doc_topic[0], expected_doc_topic, places=2)
def testDtypeBackwardCompatibility(self):
ldaseq_3_0_1_fname = datapath('DTM/ldaseq_3_0_1_model')
test_doc = [(547, 1), (549, 1), (552, 1), (555, 1)]
expected_topics = [0.99751244, 0.00248756]
# save model to use in test
# self.ldaseq.save(ldaseq_3_0_1_fname)
# load a model saved using a 3.0.1 version of Gensim
model = ldaseqmodel.LdaSeqModel.load(ldaseq_3_0_1_fname)
# and test it on a predefined document
topics = model[test_doc]
self.assertTrue(np.allclose(expected_topics, topics))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 20,187 | 82.078189 | 119 | py |
poincare_glove | poincare_glove-master/gensim/test/test_similarities.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for similarity algorithms (the similarities package).
"""
import logging
import unittest
import os
import numpy
import scipy
from smart_open import smart_open
from gensim.corpora import Dictionary
from gensim.models import word2vec
from gensim.models import doc2vec
from gensim.models import KeyedVectors
from gensim import matutils, similarities
from gensim.models import Word2Vec, FastText
from gensim.test.utils import (datapath, get_tmpfile,
common_texts as texts, common_dictionary as dictionary, common_corpus as corpus)
try:
from pyemd import emd # noqa:F401
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(texts)]
class _TestSimilarityABC(object):
"""
Base class for SparseMatrixSimilarity and MatrixSimilarity unit tests.
"""
def factoryMethod(self):
"""Creates a SimilarityABC instance."""
return self.cls(corpus, num_features=len(dictionary))
def testFull(self, num_best=None, shardsize=100):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
else:
index = self.cls(corpus, num_features=len(dictionary))
if isinstance(index, similarities.MatrixSimilarity):
expected = numpy.array([
[0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
], dtype=numpy.float32)
# HACK: dictionary can be in different order, so compare in sorted order
self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
index.num_best = num_best
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][: num_best]
# convert sims to full numpy arrays, so we can use allclose() and ignore
# ordering of items with the same similarity value
expected = matutils.sparse2full(expected, len(index))
if num_best is not None: # when num_best is None, sims is already a numpy array
sims = matutils.sparse2full(sims, len(index))
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testNumBest(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
for num_best in [None, 0, 1, 9, 1000]:
self.testFull(num_best=num_best)
def test_full2sparse_clipped(self):
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
self.assertTrue(matutils.full2sparse_clipped(vec, topn=3), expected)
def test_scipy2scipy_clipped(self):
# Test for scipy vector/row
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
vec_scipy = scipy.sparse.csr_matrix(vec)
vec_scipy_clipped = matutils.scipy2scipy_clipped(vec_scipy, topn=3)
self.assertTrue(scipy.sparse.issparse(vec_scipy_clipped))
self.assertTrue(matutils.scipy2sparse(vec_scipy_clipped), expected)
# Test for scipy matrix
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
matrix_scipy = scipy.sparse.csr_matrix([vec] * 3)
matrix_scipy_clipped = matutils.scipy2scipy_clipped(matrix_scipy, topn=3)
self.assertTrue(scipy.sparse.issparse(matrix_scipy_clipped))
self.assertTrue([matutils.scipy2sparse(x) for x in matrix_scipy_clipped], [expected] * 3)
def testEmptyQuery(self):
index = self.factoryMethod()
query = []
try:
sims = index[query]
self.assertTrue(sims is not None)
except IndexError:
self.assertTrue(False)
def testChunking(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
query = corpus[:3]
sims = index[query]
expected = numpy.array([
[0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226],
[0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
expected = [
[(0, 0.99999994), (2, 0.28867513), (1, 0.23570226)],
[(1, 1.0), (4, 0.70710677), (2, 0.40824831)],
[(2, 1.0), (3, 0.61237246), (1, 0.40824831)]
]
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testIter(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
sims = [sim for sim in index]
expected = numpy.array([
[0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226],
[0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 0.33333334, 0.61237246, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.70710677, 0.28867513, 0.0, 0.99999994, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.70710677, 0.57735026, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.99999994, 0.81649655, 0.40824828],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.81649655, 0.99999994, 0.66666663],
[0.0, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.40824828, 0.66666663, 0.99999994]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testPersistency(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testPersistencyCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testLarge(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testLargeCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname, mmap=None)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testMmap(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
index2 = self.cls.load(fname, mmap='r')
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testMmapCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
self.assertRaises(IOError, self.cls.load, fname, mmap='r')
class TestMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.MatrixSimilarity
class TestWmdSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.WmdSimilarity
self.w2v_model = Word2Vec(texts, min_count=1)
def factoryMethod(self):
# Override factoryMethod.
return self.cls(texts, self.w2v_model)
def testFull(self, num_best=None):
# Override testFull.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
index.num_best = num_best
query = texts[0]
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
# Note that similarities are bigger than zero, as they are the 1/ 1 + distances.
self.assertTrue(numpy.alltrue(sim > 0.0))
else:
self.assertTrue(sims[0] == 1.0) # Similarity of a document with itself is 0.0.
self.assertTrue(numpy.alltrue(sims[1:] > 0.0))
self.assertTrue(numpy.alltrue(sims[1:] < 1.0))
def testNonIncreasing(self):
''' Check that similarities are non-increasing when `num_best` is not
`None`.'''
# NOTE: this could be implemented for other similarities as well (i.e.
# in _TestSimilarityABC).
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model, num_best=3)
query = texts[0]
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be negative.
cond = sum(numpy.diff(sims2) < 0) == len(sims2) - 1
self.assertTrue(cond)
def testChunking(self):
# Override testChunking.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
query = texts[:3]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.alltrue(sims[i, i] == 1.0)) # Similarity of a document with itself is 0.0.
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
for sims_temp in sims:
for i, sim in sims_temp:
self.assertTrue(numpy.alltrue(sim > 0.0))
self.assertTrue(numpy.alltrue(sim <= 1.0))
def testIter(self):
# Override testIter.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
for sims in index:
self.assertTrue(numpy.alltrue(sims >= 0.0))
self.assertTrue(numpy.alltrue(sims <= 1.0))
class TestSoftCosineSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.SoftCosineSimilarity
self.dictionary = Dictionary(texts)
self.corpus = [dictionary.doc2bow(document) for document in texts]
similarity_matrix = scipy.sparse.identity(12, format="lil")
similarity_matrix[dictionary.token2id["user"], dictionary.token2id["human"]] = 0.5
similarity_matrix[dictionary.token2id["human"], dictionary.token2id["user"]] = 0.5
self.similarity_matrix = similarity_matrix.tocsc()
def factoryMethod(self):
# Override factoryMethod.
return self.cls(self.corpus, self.similarity_matrix)
def testFull(self, num_best=None):
# Override testFull.
index = self.cls(self.corpus, self.similarity_matrix, num_best=num_best)
query = self.dictionary.doc2bow(texts[0])
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
self.assertTrue(numpy.alltrue(sim <= 1.0))
self.assertTrue(numpy.alltrue(sim >= 0.0))
else:
self.assertTrue(sims[0] == 1.0) # Similarity of a document with itself is 1.0.
self.assertTrue(numpy.alltrue(sims[1:] >= 0.0))
self.assertTrue(numpy.alltrue(sims[1:] < 1.0))
expected = 2.1889350195476758
self.assertAlmostEqual(expected, numpy.sum(sims))
def testNonIncreasing(self):
""" Check that similarities are non-increasing when `num_best` is not `None`."""
# NOTE: this could be implemented for other similarities as well (i.e. in _TestSimilarityABC).
index = self.cls(self.corpus, self.similarity_matrix, num_best=5)
query = self.dictionary.doc2bow(texts[0])
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be negative.
cond = sum(numpy.diff(sims2) < 0) == len(sims2) - 1
self.assertTrue(cond)
def testChunking(self):
# Override testChunking.
index = self.cls(self.corpus, self.similarity_matrix)
query = [self.dictionary.doc2bow(document) for document in texts[:3]]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.alltrue(sims[i, i] == 1.0)) # Similarity of a document with itself is 1.0.
# test the same thing but with num_best
index.num_best = 5
sims = index[query]
for i, chunk in enumerate(sims):
expected = i
self.assertAlmostEquals(expected, chunk[0][0], places=2)
expected = 1.0
self.assertAlmostEquals(expected, chunk[0][1], places=2)
def testIter(self):
# Override testIter.
index = self.cls(self.corpus, self.similarity_matrix)
for sims in index:
self.assertTrue(numpy.alltrue(sims >= 0.0))
self.assertTrue(numpy.alltrue(sims <= 1.0))
class TestSparseMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.SparseMatrixSimilarity
def testMaintainSparsity(self):
"""Sparsity is correctly maintained when maintain_sparsity=True"""
num_features = len(dictionary)
index = self.cls(corpus, num_features=num_features)
dense_sims = index[corpus]
index = self.cls(corpus, num_features=num_features, maintain_sparsity=True)
sparse_sims = index[corpus]
self.assertFalse(scipy.sparse.issparse(dense_sims))
self.assertTrue(scipy.sparse.issparse(sparse_sims))
numpy.testing.assert_array_equal(dense_sims, sparse_sims.todense())
def testMaintainSparsityWithNumBest(self):
"""Tests that sparsity is correctly maintained when maintain_sparsity=True and num_best is not None"""
num_features = len(dictionary)
index = self.cls(corpus, num_features=num_features, maintain_sparsity=False, num_best=3)
dense_topn_sims = index[corpus]
index = self.cls(corpus, num_features=num_features, maintain_sparsity=True, num_best=3)
scipy_topn_sims = index[corpus]
self.assertFalse(scipy.sparse.issparse(dense_topn_sims))
self.assertTrue(scipy.sparse.issparse(scipy_topn_sims))
self.assertEqual(dense_topn_sims, [matutils.scipy2sparse(v) for v in scipy_topn_sims])
class TestSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.Similarity
def factoryMethod(self):
# Override factoryMethod.
return self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
def testSharding(self):
for num_best in [None, 0, 1, 9, 1000]:
for shardsize in [1, 2, 9, 1000]:
self.testFull(num_best=num_best, shardsize=shardsize)
def testReopen(self):
"""test re-opening partially full shards"""
index = similarities.Similarity(None, corpus[:5], num_features=len(dictionary), shardsize=9)
_ = index[corpus[0]] # noqa:F841 forces shard close
index.add_documents(corpus[5:])
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)]
expected = matutils.sparse2full(expected, len(index))
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def testMmapCompressed(self):
pass
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
def testChunksize(self):
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
expected = [sim for sim in index]
index.chunksize = len(index) - 1
sims = [sim for sim in index]
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
class TestWord2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy # noqa:F401
except ImportError:
raise unittest.SkipTest("Annoy library is not available")
from gensim.similarities.index import AnnoyIndexer
self.indexer = AnnoyIndexer
def testWord2Vec(self):
model = word2vec.Word2Vec(texts, min_count=1)
model.init_sims()
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def testFastText(self):
class LeeReader(object):
def __init__(self, fn):
self.fn = fn
def __iter__(self):
with smart_open(self.fn, 'r', encoding="latin_1") as infile:
for line in infile:
yield line.lower().strip().split()
model = FastText(LeeReader(datapath('lee.cor')))
model.init_sims()
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def testAnnoyIndexingOfKeyedVectors(self):
from gensim.similarities.index import AnnoyIndexer
keyVectors_file = datapath('lee_fasttext.vec')
model = KeyedVectors.load_word2vec_format(keyVectors_file)
index = AnnoyIndexer(model, 10)
self.assertEqual(index.num_trees, 10)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, model, index)
def testLoadMissingRaisesError(self):
from gensim.similarities.index import AnnoyIndexer
test_index = AnnoyIndexer()
self.assertRaises(IOError, test_index.load, fname='test-index')
def assertVectorIsSimilarToItself(self, wv, index):
vector = wv.syn0norm[0]
label = wv.index2word[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertAlmostEqual(similarity, 1.0, places=2)
def assertApproxNeighborsMatchExact(self, model, wv, index):
vector = wv.syn0norm[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar(positive=[vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertIndexSaved(self, index):
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.index import AnnoyIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
index2 = AnnoyIndexer()
index2.load(fname)
index2.model = model
self.assertEqual(index.index.f, index2.index.f)
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.num_trees, index2.num_trees)
class TestDoc2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy # noqa:F401
except ImportError:
raise unittest.SkipTest("Annoy library is not available")
from gensim.similarities.index import AnnoyIndexer
self.model = doc2vec.Doc2Vec(sentences, min_count=1)
self.model.init_sims()
self.index = AnnoyIndexer(self.model, 300)
self.vector = self.model.docvecs.doctag_syn0norm[0]
def testDocumentIsSimilarToItself(self):
approx_neighbors = self.index.most_similar(self.vector, 1)
doc, similarity = approx_neighbors[0]
self.assertEqual(doc, 0)
self.assertAlmostEqual(similarity, 1.0, places=2)
def testApproxNeighborsMatchExact(self):
approx_neighbors = self.model.docvecs.most_similar([self.vector], topn=5, indexer=self.index)
exact_neighbors = self.model.docvecs.most_similar(
positive=[self.vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def testSave(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def testLoadNotExist(self):
from gensim.similarities.index import AnnoyIndexer
self.test_index = AnnoyIndexer()
self.assertRaises(IOError, self.test_index.load, fname='test-index')
def testSaveLoad(self):
from gensim.similarities.index import AnnoyIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.index2 = AnnoyIndexer()
self.index2.load(fname)
self.index2.model = self.model
self.assertEqual(self.index.index.f, self.index2.index.f)
self.assertEqual(self.index.labels, self.index2.labels)
self.assertEqual(self.index.num_trees, self.index2.num_trees)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 26,799 | 38.586411 | 119 | py |
poincare_glove | poincare_glove-master/gensim/test/test_corpora_dictionary.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import unittest
import codecs
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from gensim.utils import to_utf8
from gensim.test.utils import get_tmpfile, common_texts
from six import PY3
from six.moves import zip
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = common_texts
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
# Since we don't specify the order in which dictionaries are built,
# we cannot reliably test for the mapping; only the keys and values.
expected_keys = list(range(12))
expected_values = [2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
self.assertEqual(sorted(d.dfs.keys()), expected_keys)
self.assertEqual(sorted(d.dfs.values()), expected_values)
expected_keys = sorted([
'computer', 'eps', 'graph', 'human', 'interface',
'minors', 'response', 'survey', 'system', 'time', 'trees', 'user'
])
expected_values = list(range(12))
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)
def testMerge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])
f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def testFilterKeepTokens_keepTokens(self):
# provide keep_tokens argument, keep the tokens given
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['human', 'survey'])
expected = {'graph', 'trees', 'human', 'system', 'user', 'survey'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unchangedFunctionality(self):
# do not provide keep_tokens argument, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0)
expected = {'graph', 'trees', 'system', 'user'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unseenToken(self):
# do provide keep_tokens argument with unseen tokens, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['unknown_token'])
expected = {'graph', 'trees', 'system', 'user'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterMostFrequent(self):
d = Dictionary(self.texts)
d.filter_n_most_frequent(4)
expected = {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2}
self.assertEqual(d.dfs, expected)
def testFilterTokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)
removed_word = d[0]
d.filter_tokens([0])
expected = {
'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7
}
del expected[removed_word]
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
expected[removed_word] = len(expected)
d.add_documents([[removed_word]])
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText(self):
"""`Dictionary` can be saved as textfile. """
tmpf = get_tmpfile('save_dict_test.txt')
small_text = [
["prvé", "slovo"],
["slovo", "druhé"],
["druhé", "slovo"]
]
d = Dictionary(small_text)
d.save_as_text(tmpf)
with codecs.open(tmpf, 'r', encoding='utf-8') as file:
serialized_lines = file.readlines()
self.assertEqual(serialized_lines[0], u"3\n")
self.assertEqual(len(serialized_lines), 4)
# We do not know, which word will have which index
self.assertEqual(serialized_lines[1][1:], u"\tdruhé\t2\n")
self.assertEqual(serialized_lines[2][1:], u"\tprvé\t1\n")
self.assertEqual(serialized_lines[3][1:], u"\tslovo\t3\n")
d.save_as_text(tmpf, sort_by_word=False)
with codecs.open(tmpf, 'r', encoding='utf-8') as file:
serialized_lines = file.readlines()
self.assertEqual(serialized_lines[0], u"3\n")
self.assertEqual(len(serialized_lines), 4)
self.assertEqual(serialized_lines[1][1:], u"\tslovo\t3\n")
self.assertEqual(serialized_lines[2][1:], u"\tdruhé\t2\n")
self.assertEqual(serialized_lines[3][1:], u"\tprvé\t1\n")
def test_loadFromText_legacy(self):
"""
`Dictionary` can be loaded from textfile in legacy format.
Legacy format does not have num_docs on the first line.
"""
tmpf = get_tmpfile('load_dict_test_legacy.txt')
no_num_docs_serialization = to_utf8("1\tprvé\t1\n2\tslovo\t2\n")
with open(tmpf, "wb") as file:
file.write(no_num_docs_serialization)
d = Dictionary.load_from_text(tmpf)
self.assertEqual(d.token2id[u"prvé"], 1)
self.assertEqual(d.token2id[u"slovo"], 2)
self.assertEqual(d.dfs[1], 1)
self.assertEqual(d.dfs[2], 2)
self.assertEqual(d.num_docs, 0)
def test_loadFromText(self):
"""`Dictionary` can be loaded from textfile."""
tmpf = get_tmpfile('load_dict_test.txt')
no_num_docs_serialization = to_utf8("2\n1\tprvé\t1\n2\tslovo\t2\n")
with open(tmpf, "wb") as file:
file.write(no_num_docs_serialization)
d = Dictionary.load_from_text(tmpf)
self.assertEqual(d.token2id[u"prvé"], 1)
self.assertEqual(d.token2id[u"slovo"], 2)
self.assertEqual(d.dfs[1], 1)
self.assertEqual(d.dfs[2], 2)
self.assertEqual(d.num_docs, 2)
def test_saveAsText_and_loadFromText(self):
"""`Dictionary` can be saved as textfile and loaded again from textfile. """
tmpf = get_tmpfile('dict_test.txt')
for sort_by_word in [True, False]:
d = Dictionary(self.texts)
d.save_as_text(tmpf, sort_by_word=sort_by_word)
self.assertTrue(os.path.exists(tmpf))
d_loaded = Dictionary.load_from_text(tmpf)
self.assertNotEqual(d_loaded, None)
self.assertEqual(d_loaded.token2id, d.token2id)
def test_from_corpus(self):
"""build `Dictionary` from an existing corpus"""
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"
]
stoplist = set('for a of the and to in'.split())
texts = [
[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once] for text in texts]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# Create dictionary from corpus without a token map
dictionary_from_corpus = Dictionary.from_corpus(corpus)
dict_token2id_vals = sorted(dictionary.token2id.values())
dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values())
self.assertEqual(dict_token2id_vals, dict_from_corpus_vals)
self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz)
# Create dictionary from corpus with an id=>token map
dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary)
self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id)
self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz)
# Ensure Sparse2Corpus is compatible with from_corpus
bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100))
dictionary = Dictionary.from_corpus(bow)
self.assertEqual(dictionary.num_docs, 100)
def test_dict_interface(self):
"""Test Python 2 dict-like interface in both Python 2 and 3."""
d = Dictionary(self.texts)
self.assertTrue(isinstance(d, Mapping))
self.assertEqual(list(zip(d.keys(), d.values())), list(d.items()))
# Even in Py3, we want the iter* members.
self.assertEqual(list(d.items()), list(d.iteritems()))
self.assertEqual(list(d.keys()), list(d.iterkeys()))
self.assertEqual(list(d.values()), list(d.itervalues()))
# XXX Do we want list results from the dict members in Py3 too?
if not PY3:
self.assertTrue(isinstance(d.items(), list))
self.assertTrue(isinstance(d.keys(), list))
self.assertTrue(isinstance(d.values(), list))
# endclass TestDictionary
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
| 12,467 | 38.207547 | 104 | py |
poincare_glove | poincare_glove-master/gensim/test/test_tmdiff.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import unittest
import numpy as np
from gensim.models import LdaModel
from gensim.test.utils import common_dictionary, common_corpus
class TestLdaDiff(unittest.TestCase):
def setUp(self):
self.dictionary = common_dictionary
self.corpus = common_corpus
self.num_topics = 5
self.n_ann_terms = 10
self.model = LdaModel(corpus=self.corpus, id2word=self.dictionary, num_topics=self.num_topics, passes=10)
def testBasic(self):
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms)
self.assertEqual(mdiff.shape, (self.num_topics, self.num_topics))
self.assertEqual(len(annotation), self.num_topics)
self.assertEqual(len(annotation[0]), self.num_topics)
# test for diagonal case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, diagonal=True)
self.assertEqual(mdiff.shape, (self.num_topics,))
self.assertEqual(len(annotation), self.num_topics)
def testIdentity(self):
for dist_name in ["hellinger", "kullback_leibler", "jaccard"]:
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name)
for row in annotation:
for (int_tokens, diff_tokens) in row:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(np.diag(mdiff), np.zeros(mdiff.shape[0], dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
# test for diagonal case
mdiff, annotation = \
self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name, diagonal=True)
for (int_tokens, diff_tokens) in annotation:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
def testInput(self):
self.assertRaises(ValueError, self.model.diff, self.model, n_ann_terms=self.n_ann_terms, distance='something')
self.assertRaises(ValueError, self.model.diff, [], n_ann_terms=self.n_ann_terms, distance='something')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,920 | 39.013699 | 118 | py |
poincare_glove | poincare_glove-master/gensim/test/test_varembed_wrapper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Anmol Gulati <anmol01gulati@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for VarEmbed wrapper.
"""
import logging
import sys
import numpy as np
import unittest
from gensim.models.wrappers import varembed
from gensim.test.utils import datapath
try:
import morfessor # noqa: F401
except ImportError:
raise unittest.SkipTest("Test requires Morfessor to be installed, which is not available")
varembed_model_vector_file = datapath('varembed_vectors.pkl')
varembed_model_morfessor_file = datapath('varembed_morfessor.bin')
class TestVarembed(unittest.TestCase):
def testLoadVarembedFormat(self):
"""Test storing/loading the entire model."""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.model_sanity(model)
def testSimilarity(self):
"""Test n_similarity for vocab words"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.assertTrue(model.n_similarity(['result'], ['targets']) == model.similarity('result', 'targets'))
def model_sanity(self, model):
"""Check vocabulary and vector size"""
self.assertEqual(model.syn0.shape, (model.vocab_size, model.vector_size))
self.assertTrue(model.syn0.shape[0] == len(model.vocab))
@unittest.skipIf(sys.version_info < (2, 7), 'Supported only on Python 2.7 and above')
def testAddMorphemesToEmbeddings(self):
"""Test add morphemes to Embeddings
Test only in Python 2.7 and above. Add Morphemes is not supported in earlier versions.
"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
model_with_morphemes = varembed.VarEmbed.load_varembed_format(
vectors=varembed_model_vector_file, morfessor_model=varembed_model_morfessor_file)
self.model_sanity(model_with_morphemes)
# Check syn0 is different for both models.
self.assertFalse(np.allclose(model.syn0, model_with_morphemes.syn0))
def testLookup(self):
"""Test lookup of vector for a particular word and list"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.assertTrue(np.allclose(model['language'], model[['language']]))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2,623 | 36.485714 | 109 | py |
poincare_glove | poincare_glove-master/gensim/test/test_keyedvectors.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the poincare module from the models package.
"""
import logging
import unittest
import numpy as np
from gensim.corpora import Dictionary
from gensim.models import KeyedVectors as EuclideanKeyedVectors
from gensim.test.utils import datapath
logger = logging.getLogger(__name__)
class TestEuclideanKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = EuclideanKeyedVectors.load_word2vec_format(
datapath('euclidean_vectors.bin'), binary=True, datatype=np.float64)
def similarity_matrix(self):
"""Test similarity_matrix returns expected results."""
corpus = [["government", "denied", "holiday"], ["holiday", "slowing", "hollingworth"]]
dictionary = Dictionary(corpus)
corpus = [dictionary.doc2bow(document) for document in corpus]
# checking symmetry and the existence of ones on the diagonal
similarity_matrix = self.similarity_matrix(corpus, dictionary).todense()
self.assertTrue((similarity_matrix.T == similarity_matrix).all())
self.assertTrue((np.diag(similarity_matrix) == similarity_matrix).all())
# checking that thresholding works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, threshold=0.45).todense()
self.assertEquals(18, np.sum(similarity_matrix == 0))
# checking that exponent works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, exponent=1.0).todense()
self.assertAlmostEqual(9.5788956, np.sum(similarity_matrix))
# checking that nonzero_limit works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, nonzero_limit=4).todense()
self.assertEquals(4, np.sum(similarity_matrix == 0))
similarity_matrix = self.similarity_matrix(corpus, dictionary, nonzero_limit=3).todense()
self.assertEquals(20, np.sum(similarity_matrix == 0))
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.most_similar('war', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('war', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('war', topn=10)), 10)
predicted = self.vectors.most_similar('war', topn=None)
self.assertEqual(len(predicted), len(self.vectors.vocab))
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index2word[:5])
predicted = set(result[0] for result in self.vectors.most_similar('war', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('war', ['terrorism', 'call', 'waging'])
self.assertEqual(predicted, 'terrorism')
def test_similar_by_word(self):
"""Test similar_by_word returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.similar_by_word('war', topn=5)]
self.assertEqual(expected, predicted)
def test_similar_by_vector(self):
"""Test similar_by_word returns expected results."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.similar_by_vector(input_vector, topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('war', 'conflict'), 0.06694602))
self.assertEqual(self.vectors.distance('war', 'war'), 0)
def test_similarity(self):
"""Test similarity returns expected value for two words, and for identical words."""
self.assertTrue(np.allclose(self.vectors.similarity('war', 'war'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('war', 'conflict'), 0.93305397))
def test_words_closer_than(self):
"""Test words_closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.words_closer_than('war', 'war'), [])
expected = set(['conflict', 'administration'])
self.assertEqual(set(self.vectors.words_closer_than('war', 'terrorism')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('war', 'war'), 1)
self.assertEqual(self.vectors.rank('war', 'terrorism'), 3)
def test_wv_property(self):
"""Test that the deprecated `wv` property returns `self`. To be removed in v4.0.0."""
self.assertTrue(self.vectors is self.vectors.wv)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 6,544 | 39.90625 | 107 | py |
poincare_glove | poincare_glove-master/gensim/test/test_direct_confirmation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for direct confirmation measures in the direct_confirmation_measure module.
"""
import logging
import unittest
from collections import namedtuple
from gensim.topic_coherence import direct_confirmation_measure
from gensim.topic_coherence import text_analysis
class TestDirectConfirmationMeasure(unittest.TestCase):
def setUp(self):
# Set up toy example for better understanding and testing
# of this module. See the modules for the mathematical formulas
self.segmentation = [[(1, 2)]]
self.posting_list = {1: {2, 3, 4}, 2: {3, 5}}
self.num_docs = 5
id2token = {1: 'test', 2: 'doc'}
token2id = {v: k for k, v in id2token.items()}
dictionary = namedtuple('Dictionary', 'token2id, id2token')(token2id, id2token)
self.accumulator = text_analysis.InvertedIndexAccumulator({1, 2}, dictionary)
self.accumulator._inverted_index = {0: {2, 3, 4}, 1: {3, 5}}
self.accumulator._num_docs = self.num_docs
def testLogConditionalProbability(self):
"""Test log_conditional_probability()"""
obtained = direct_confirmation_measure.log_conditional_probability(
self.segmentation, self.accumulator)[0]
# Answer should be ~ ln(1 / 2) = -0.693147181
expected = -0.693147181
self.assertAlmostEqual(expected, obtained)
mean, std = direct_confirmation_measure.log_conditional_probability(
self.segmentation, self.accumulator, with_std=True)[0]
self.assertAlmostEqual(expected, mean)
self.assertEqual(0.0, std)
def testLogRatioMeasure(self):
"""Test log_ratio_measure()"""
obtained = direct_confirmation_measure.log_ratio_measure(
self.segmentation, self.accumulator)[0]
# Answer should be ~ ln{(1 / 5) / [(3 / 5) * (2 / 5)]} = -0.182321557
expected = -0.182321557
self.assertAlmostEqual(expected, obtained)
mean, std = direct_confirmation_measure.log_ratio_measure(
self.segmentation, self.accumulator, with_std=True)[0]
self.assertAlmostEqual(expected, mean)
self.assertEqual(0.0, std)
def testNormalizedLogRatioMeasure(self):
"""Test normalized_log_ratio_measure()"""
obtained = direct_confirmation_measure.log_ratio_measure(
self.segmentation, self.accumulator, normalize=True)[0]
# Answer should be ~ -0.182321557 / -ln(1 / 5) = -0.113282753
expected = -0.113282753
self.assertAlmostEqual(expected, obtained)
mean, std = direct_confirmation_measure.log_ratio_measure(
self.segmentation, self.accumulator, normalize=True, with_std=True)[0]
self.assertAlmostEqual(expected, mean)
self.assertEqual(0.0, std)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 3,057 | 38.714286 | 91 | py |
poincare_glove | poincare_glove-master/gensim/test/test_lsimodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
import scipy.linalg
from gensim import matutils
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import lsimodel
from gensim.test import basetmtests
from gensim.test.utils import datapath, get_tmpfile
class TestLsiModel(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.model = lsimodel.LsiModel(self.corpus, num_topics=2)
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = self.model
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(np.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = np.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testTransformFloat32(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2, dtype=np.float32)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(np.allclose(s[:2], model.projection.s)) # singular values must match
self.assertEqual(model.projection.u.dtype, np.float32)
self.assertEqual(model.projection.s.dtype, np.float32)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.6594664, 0.142115444]) # scaled LSI version
# transformed entries must be equal up to sign
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1.e-5))
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = self.model
got = np.vstack(matutils.sparse2full(doc, 2) for doc in model[self.corpus])
expected = np.array([
[0.65946639, 0.14211544],
[2.02454305, -0.42088759],
[1.54655361, 0.32358921],
[1.81114125, 0.5890525],
[0.9336738, -0.27138939],
[0.01274618, -0.49016181],
[0.04888203, -1.11294699],
[0.08063836, -1.56345594],
[0.27381003, -1.34694159]
])
self.assertTrue(np.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
# start with no documents, we will add them later
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5)
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = np.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = np.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
# the two LSI representations must equal up to sign
self.assertTrue(np.allclose(abs(vec1), abs(vec2), atol=1e-5))
def testPersistence(self):
fname = get_tmpfile('gensim_models_lsi.tst')
model = self.model
model.save(fname)
model2 = lsimodel.LsiModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_lsi.tst.gz')
model = self.model
model.save(fname)
model2 = lsimodel.LsiModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = get_tmpfile('gensim_models_lsi.tst')
model = self.model
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.projection.u, np.memmap))
self.assertTrue(isinstance(model2.projection.s, np.memmap))
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = get_tmpfile('gensim_models_lsi.tst.gz')
model = self.model
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
return
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
self.assertRaises(IOError, lsimodel.LsiModel.load, fname, mmap='r')
def testDocsProcessed(self):
self.assertEqual(self.model.docs_processed, 9)
self.assertEqual(self.model.docs_processed, self.corpus.num_docs)
def test_get_topics(self):
topics = self.model.get_topics()
vocab_size = len(self.model.id2word)
for topic in topics:
self.assertTrue(isinstance(topic, np.ndarray))
self.assertEqual(topic.dtype, np.float64)
self.assertEqual(vocab_size, topic.shape[0])
# LSI topics are not probability distributions
# self.assertAlmostEqual(np.sum(topic), 1.0, 5)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 8,417 | 43.305263 | 120 | py |
poincare_glove | poincare_glove-master/gensim/test/test_phrases.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import six
from gensim.utils import to_unicode
from gensim.models.phrases import SentenceAnalyzer, Phrases, Phraser
from gensim.models.phrases import pseudocorpus, original_scorer
from gensim.test.utils import common_texts, temporary_file, datapath
class TestUtils(unittest.TestCase):
def test_pseudocorpus_no_common_terms(self):
vocab = [
"prime_minister",
"gold",
"chief_technical_officer",
"effective"]
result = list(pseudocorpus(vocab, "_"))
self.assertEqual(
result,
[["prime", "minister"],
["chief", "technical_officer"],
["chief_technical", "officer"]])
def test_pseudocorpus_with_common_terms(self):
vocab = [
"hall_of_fame",
"gold",
"chief_of_political_bureau",
"effective",
"beware_of_the_dog_in_the_yard"]
common_terms = frozenset(["in", "the", "of"])
result = list(pseudocorpus(vocab, "_", common_terms=common_terms))
self.assertEqual(
result,
[["hall", "of", "fame"],
["chief", "of", "political_bureau"],
["chief_of_political", "bureau"],
["beware", "of", "the", "dog_in_the_yard"],
["beware_of_the_dog", "in", "the", "yard"]])
class TestPhraseAnalysis(unittest.TestCase):
class AnalysisTester(SentenceAnalyzer):
def __init__(self, scores):
self.scores = scores
def score_item(self, worda, wordb, components, scorer):
"""Override for test purpose"""
if worda is not None and wordb is not None:
bigram_word = b"_".join(components)
return self.scores.get(bigram_word, -1)
else:
return -1
def analyze(self, scores, sentence):
analyzer = self.AnalysisTester(scores)
return list(analyzer.analyze_sentence(
sentence,
threshold=1,
common_terms={b"a", b"the", b"with", b"of"},
scorer=None))
def analyze_words(self, scores, sentence):
result = (
w if isinstance(w, (tuple, list)) else [w]
for w, score in self.analyze(scores, sentence))
return [b"_".join(w).decode("utf-8") for w in result]
def test_simple_analysis(self):
s = ["simple", "sentence", "should", "pass"]
result = self.analyze_words({}, s)
self.assertEqual(result, s)
s = ["a", "simple", "sentence", "with", "no", "bigram", "but", "common", "terms"]
result = self.analyze_words({}, s)
self.assertEqual(result, s)
def test_analysis_bigrams(self):
scores = {
b"simple_sentence": 2, b"sentence_many": 2,
b"many_possible": 2, b"possible_bigrams": 2}
s = ["simple", "sentence", "many", "possible", "bigrams"]
result = self.analyze_words(scores, s)
self.assertEqual(result, ["simple_sentence", "many_possible", "bigrams"])
s = ["some", "simple", "sentence", "many", "bigrams"]
result = self.analyze_words(scores, s)
self.assertEqual(result, ["some", "simple_sentence", "many", "bigrams"])
s = ["some", "unrelated", "simple", "words"]
result = self.analyze_words(scores, s)
self.assertEqual(result, s)
def test_analysis_common_terms(self):
scores = {
b"simple_sentence": 2, b"sentence_many": 2,
b"many_possible": 2, b"possible_bigrams": 2}
s = ["a", "simple", "sentence", "many", "the", "possible", "bigrams"]
result = self.analyze_words(scores, s)
self.assertEqual(result, ["a", "simple_sentence", "many", "the", "possible_bigrams"])
s = ["simple", "the", "sentence", "and", "many", "possible", "bigrams", "with", "a"]
result = self.analyze_words(scores, s)
self.assertEqual(result, [
"simple", "the", "sentence", "and", "many_possible", "bigrams", "with", "a"])
def test_analysis_common_terms_in_between(self):
scores = {
b"simple_sentence": 2, b"sentence_with_many": 2,
b"many_possible": 2, b"many_of_the_possible": 2, b"possible_bigrams": 2}
s = ["sentence", "with", "many", "possible", "bigrams"]
result = self.analyze_words(scores, s)
self.assertEqual(result, ["sentence_with_many", "possible_bigrams"])
s = ["a", "simple", "sentence", "with", "many", "of", "the", "possible", "bigrams", "with"]
result = self.analyze_words(scores, s)
self.assertEqual(
result, ["a", "simple_sentence", "with", "many_of_the_possible", "bigrams", "with"])
class PhrasesData:
sentences = common_texts + [
['graph', 'minors', 'survey', 'human', 'interface']
]
unicode_sentences = [[to_unicode(w) for w in sentence] for sentence in sentences]
common_terms = frozenset()
bigram1 = u'response_time'
bigram2 = u'graph_minors'
bigram3 = u'human_interface'
def gen_sentences(self):
return ((w for w in sentence) for sentence in self.sentences)
class PhrasesCommon:
""" Tests that need to be run for both Prases and Phraser classes."""
def setUp(self):
self.bigram = Phrases(
self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
self.bigram_default = Phrases(
self.sentences, common_terms=self.common_terms)
self.bigram_utf8 = Phrases(
self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
self.bigram_unicode = Phrases(
self.unicode_sentences, min_count=1, threshold=1, common_terms=self.common_terms)
def testEmptyPhrasifiedSentencesIterator(self):
bigram_phrases = Phrases(self.sentences)
bigram_phraser = Phraser(bigram_phrases)
trigram_phrases = Phrases(bigram_phraser[self.sentences])
trigram_phraser = Phraser(trigram_phrases)
trigrams = trigram_phraser[bigram_phraser[self.sentences]]
fst, snd = list(trigrams), list(trigrams)
self.assertEqual(fst, snd)
self.assertNotEqual(snd, [])
def testEmptyInputsOnBigramConstruction(self):
"""Test that empty inputs don't throw errors and return the expected result."""
# Empty list -> empty list
self.assertEqual(list(self.bigram_default[[]]), [])
# Empty iterator -> empty list
self.assertEqual(list(self.bigram_default[iter(())]), [])
# List of empty list -> list of empty list
self.assertEqual(list(self.bigram_default[[[], []]]), [[], []])
# Iterator of empty list -> list of empty list
self.assertEqual(list(self.bigram_default[iter([[], []])]), [[], []])
# Iterator of empty iterator -> list of empty list
self.assertEqual(list(self.bigram_default[(iter(()) for i in range(2))]), [[], []])
def testSentenceGeneration(self):
"""Test basic bigram using a dummy corpus."""
# test that we generate the same amount of sentences as the input
self.assertEqual(len(self.sentences), len(list(self.bigram_default[self.sentences])))
def testSentenceGenerationWithGenerator(self):
"""Test basic bigram production when corpus is a generator."""
self.assertEqual(len(list(self.gen_sentences())),
len(list(self.bigram_default[self.gen_sentences()])))
def testBigramConstruction(self):
"""Test Phrases bigram construction building."""
# with this setting we should get response_time and graph_minors
bigram1_seen = False
bigram2_seen = False
for s in self.bigram[self.sentences]:
if not bigram1_seen and self.bigram1 in s:
bigram1_seen = True
if not bigram2_seen and self.bigram2 in s:
bigram2_seen = True
if bigram1_seen and bigram2_seen:
break
self.assertTrue(bigram1_seen and bigram2_seen)
# check the same thing, this time using single doc transformation
# last sentence should contain both graph_minors and human_interface
self.assertTrue(self.bigram1 in self.bigram[self.sentences[1]])
self.assertTrue(self.bigram1 in self.bigram[self.sentences[4]])
self.assertTrue(self.bigram2 in self.bigram[self.sentences[-2]])
self.assertTrue(self.bigram2 in self.bigram[self.sentences[-1]])
self.assertTrue(self.bigram3 in self.bigram[self.sentences[-1]])
def testBigramConstructionFromGenerator(self):
"""Test Phrases bigram construction building when corpus is a generator"""
bigram1_seen = False
bigram2_seen = False
for s in self.bigram[self.gen_sentences()]:
if not bigram1_seen and self.bigram1 in s:
bigram1_seen = True
if not bigram2_seen and self.bigram2 in s:
bigram2_seen = True
if bigram1_seen and bigram2_seen:
break
self.assertTrue(bigram1_seen and bigram2_seen)
def testEncoding(self):
"""Test that both utf8 and unicode input work; output must be unicode."""
expected = [u'survey', u'user', u'computer', u'system', u'response_time']
self.assertEqual(self.bigram_utf8[self.sentences[1]], expected)
self.assertEqual(self.bigram_unicode[self.sentences[1]], expected)
transformed = ' '.join(self.bigram_utf8[self.sentences[1]])
self.assertTrue(isinstance(transformed, six.text_type))
# scorer for testCustomScorer
# function is outside of the scope of the test because for picklability of custom scorer
# Phrases tests for picklability
# all scores will be 1
def dumb_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
return 1
class TestPhrasesModel(PhrasesData, PhrasesCommon, unittest.TestCase):
def testExportPhrases(self):
"""Test Phrases bigram export_phrases functionality."""
bigram = Phrases(self.sentences, min_count=1, threshold=1)
seen_bigrams = set()
for phrase, score in bigram.export_phrases(self.sentences):
seen_bigrams.add(phrase)
assert seen_bigrams == {
b'response time',
b'graph minors',
b'human interface',
}
def testMultipleBigramsSingleEntry(self):
""" a single entry should produce multiple bigrams. """
bigram = Phrases(self.sentences, min_count=1, threshold=1)
seen_bigrams = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_bigrams.add(phrase)
assert seen_bigrams == {b'graph minors', b'human interface'}
def testScoringDefault(self):
""" test the default scoring, from the mikolov word2vec paper """
bigram = Phrases(self.sentences, min_count=1, threshold=1)
seen_scores = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == {
5.167, # score for graph minors
3.444 # score for human interface
}
def test__getitem__(self):
""" test Phrases[sentences] with a single sentence"""
bigram = Phrases(self.sentences, min_count=1, threshold=1)
# pdb.set_trace()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
phrased_sentence = next(bigram[test_sentences].__iter__())
assert phrased_sentence == ['graph_minors', 'survey', 'human_interface']
def testScoringNpmi(self):
""" test normalized pointwise mutual information scoring """
bigram = Phrases(self.sentences, min_count=1, threshold=.5, scoring='npmi')
seen_scores = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == {
.882, # score for graph minors
.714 # score for human interface
}
def testCustomScorer(self):
""" test using a custom scoring function """
bigram = Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer)
seen_scores = []
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.append(score)
assert all(seen_scores) # all scores 1
assert len(seen_scores) == 3 # 'graph minors' and 'survey human' and 'interface system'
def testBadParameters(self):
"""Test the phrases module with bad parameters."""
# should fail with something less or equal than 0
self.assertRaises(ValueError, Phrases, self.sentences, min_count=0)
# threshold should be positive
self.assertRaises(ValueError, Phrases, self.sentences, threshold=-1)
def testPruning(self):
"""Test that max_vocab_size parameter is respected."""
bigram = Phrases(self.sentences, max_vocab_size=5)
self.assertTrue(len(bigram.vocab) <= 5)
# endclass TestPhrasesModel
class TestPhrasesPersistence(PhrasesData, unittest.TestCase):
def testSaveLoadCustomScorer(self):
""" saving and loading a Phrases object with a custom scorer """
with temporary_file("test.pkl") as fpath:
bigram = Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer)
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
seen_scores = []
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
for phrase, score in bigram_loaded.export_phrases(test_sentences):
seen_scores.append(score)
assert all(seen_scores) # all scores 1
assert len(seen_scores) == 3 # 'graph minors' and 'survey human' and 'interface system'
def testSaveLoad(self):
""" Saving and loading a Phrases object."""
with temporary_file("test.pkl") as fpath:
bigram = Phrases(self.sentences, min_count=1, threshold=1)
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
seen_scores = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
for phrase, score in bigram_loaded.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadStringScoring(self):
""" Saving and loading a Phrases object with a string scoring parameter.
This should ensure backwards compatibility with the previous version of Phrases"""
bigram_loaded = Phrases.load(datapath("phrases-scoring-str.pkl"))
seen_scores = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
for phrase, score in bigram_loaded.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadNoScoring(self):
""" Saving and loading a Phrases object with no scoring parameter.
This should ensure backwards compatibility with old versions of Phrases"""
bigram_loaded = Phrases.load(datapath("phrases-no-scoring.pkl"))
seen_scores = set()
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
for phrase, score in bigram_loaded.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadNoCommonTerms(self):
""" Ensure backwards compatibility with old versions of Phrases, before common_terms"""
bigram_loaded = Phrases.load(datapath("phrases-no-common-terms.pkl"))
self.assertEqual(bigram_loaded.common_terms, frozenset())
# can make a phraser, cf #1751
phraser = Phraser(bigram_loaded) # does not raise
phraser[["human", "interface", "survey"]] # does not raise
class TestPhraserPersistence(PhrasesData, unittest.TestCase):
def testSaveLoadCustomScorer(self):
"""Saving and loading a Phraser object with a custom scorer """
with temporary_file("test.pkl") as fpath:
bigram = Phraser(
Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer))
bigram.save(fpath)
bigram_loaded = Phraser.load(fpath)
# we do not much with scoring, just verify its the one expected
self.assertEqual(bigram_loaded.scoring, dumb_scorer)
def testSaveLoad(self):
""" Saving and loading a Phraser object."""
with temporary_file("test.pkl") as fpath:
bigram = Phraser(Phrases(self.sentences, min_count=1, threshold=1))
bigram.save(fpath)
bigram_loaded = Phraser.load(fpath)
self.assertEqual(
bigram_loaded[['graph', 'minors', 'survey', 'human', 'interface', 'system']],
['graph_minors', 'survey', 'human_interface', 'system'])
def testSaveLoadStringScoring(self):
""" Saving and loading a Phraser object with a string scoring parameter.
This should ensure backwards compatibility with the previous version of Phraser"""
bigram_loaded = Phraser.load(datapath("phraser-scoring-str.pkl"))
# we do not much with scoring, just verify its the one expected
self.assertEqual(bigram_loaded.scoring, original_scorer)
def testSaveLoadNoScoring(self):
""" Saving and loading a Phraser object with no scoring parameter.
This should ensure backwards compatibility with old versions of Phraser"""
bigram_loaded = Phraser.load(datapath("phraser-no-scoring.pkl"))
# we do not much with scoring, just verify its the one expected
self.assertEqual(bigram_loaded.scoring, original_scorer)
def testSaveLoadNoCommonTerms(self):
""" Ensure backwards compatibility with old versions of Phraser, before common_terms"""
bigram_loaded = Phraser.load(datapath("phraser-no-common-terms.pkl"))
self.assertEqual(bigram_loaded.common_terms, frozenset())
class TestPhraserModel(PhrasesData, PhrasesCommon, unittest.TestCase):
""" Test Phraser models."""
def setUp(self):
"""Set up Phraser models for the tests."""
bigram_phrases = Phrases(
self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
self.bigram = Phraser(bigram_phrases)
bigram_default_phrases = Phrases(self.sentences, common_terms=self.common_terms)
self.bigram_default = Phraser(bigram_default_phrases)
bigram_utf8_phrases = Phrases(
self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
self.bigram_utf8 = Phraser(bigram_utf8_phrases)
bigram_unicode_phrases = Phrases(
self.unicode_sentences, min_count=1, threshold=1, common_terms=self.common_terms)
self.bigram_unicode = Phraser(bigram_unicode_phrases)
class CommonTermsPhrasesData:
"""This mixin permits to reuse the test, using, this time the common_terms option
"""
sentences = [
['human', 'interface', 'with', 'computer'],
['survey', 'of', 'user', 'computer', 'system', 'lack', 'of', 'interest'],
['eps', 'user', 'interface', 'system'],
['system', 'and', 'human', 'system', 'eps'],
['user', 'lack', 'of', 'interest'],
['trees'],
['graph', 'of', 'trees'],
['data', 'and', 'graph', 'of', 'trees'],
['data', 'and', 'graph', 'survey'],
['data', 'and', 'graph', 'survey', 'for', 'human', 'interface'] # test bigrams within same sentence
]
unicode_sentences = [[to_unicode(w) for w in sentence] for sentence in sentences]
common_terms = ['of', 'and', 'for']
bigram1 = u'lack_of_interest'
bigram2 = u'data_and_graph'
bigram3 = u'human_interface'
expression1 = u'lack of interest'
expression2 = u'data and graph'
expression3 = u'human interface'
def gen_sentences(self):
return ((w for w in sentence) for sentence in self.sentences)
class TestPhrasesModelCommonTerms(CommonTermsPhrasesData, TestPhrasesModel):
"""Test Phrases models with common terms"""
def testEncoding(self):
"""Test that both utf8 and unicode input work; output must be unicode."""
expected = [u'survey', u'of', u'user', u'computer', u'system', u'lack_of_interest']
self.assertEqual(self.bigram_utf8[self.sentences[1]], expected)
self.assertEqual(self.bigram_unicode[self.sentences[1]], expected)
transformed = ' '.join(self.bigram_utf8[self.sentences[1]])
self.assertTrue(isinstance(transformed, six.text_type))
def testMultipleBigramsSingleEntry(self):
""" a single entry should produce multiple bigrams. """
bigram = Phrases(self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
seen_bigrams = set()
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_bigrams.add(phrase)
assert seen_bigrams == set([
b'data and graph',
b'human interface',
])
def testExportPhrases(self):
"""Test Phrases bigram export_phrases functionality."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
seen_bigrams = set()
for phrase, score in bigram.export_phrases(self.sentences):
seen_bigrams.add(phrase)
assert seen_bigrams == set([
b'human interface',
b'graph of trees',
b'data and graph',
b'lack of interest',
])
def testScoringDefault(self):
""" test the default scoring, from the mikolov word2vec paper """
bigram = Phrases(self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
seen_scores = set()
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
min_count = float(bigram.min_count)
len_vocab = float(len(bigram.vocab))
graph = float(bigram.vocab[b"graph"])
data = float(bigram.vocab[b"data"])
data_and_graph = float(bigram.vocab[b"data_and_graph"])
human = float(bigram.vocab[b"human"])
interface = float(bigram.vocab[b"interface"])
human_interface = float(bigram.vocab[b"human_interface"])
assert seen_scores == set([
# score for data and graph
round((data_and_graph - min_count) / data / graph * len_vocab, 3),
# score for human interface
round((human_interface - min_count) / human / interface * len_vocab, 3),
])
def testScoringNpmi(self):
""" test normalized pointwise mutual information scoring """
bigram = Phrases(self.sentences, min_count=1, threshold=.5,
scoring='npmi', common_terms=self.common_terms)
seen_scores = set()
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.add(round(score, 3))
assert seen_scores == set([
.74, # score for data and graph
.894 # score for human interface
])
def testCustomScorer(self):
""" test using a custom scoring function """
bigram = Phrases(self.sentences, min_count=1, threshold=.001,
scoring=dumb_scorer, common_terms=self.common_terms)
seen_scores = []
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
for phrase, score in bigram.export_phrases(test_sentences):
seen_scores.append(score)
assert all(seen_scores) # all scores 1
assert len(seen_scores) == 2 # 'data and graph' 'survey for human'
def test__getitem__(self):
""" test Phrases[sentences] with a single sentence"""
bigram = Phrases(self.sentences, min_count=1, threshold=1, common_terms=self.common_terms)
# pdb.set_trace()
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
phrased_sentence = next(bigram[test_sentences].__iter__())
assert phrased_sentence == ['data_and_graph', 'survey', 'for', 'human_interface']
class TestPhraserModelCommonTerms(CommonTermsPhrasesData, TestPhraserModel):
def testEncoding(self):
"""Test that both utf8 and unicode input work; output must be unicode."""
expected = [u'survey', u'of', u'user', u'computer', u'system', u'lack_of_interest']
self.assertEqual(self.bigram_utf8[self.sentences[1]], expected)
self.assertEqual(self.bigram_unicode[self.sentences[1]], expected)
transformed = ' '.join(self.bigram_utf8[self.sentences[1]])
self.assertTrue(isinstance(transformed, six.text_type))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 26,276 | 40.316038 | 108 | py |
poincare_glove | poincare_glove-master/gensim/test/test_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions.
"""
import logging
import unittest
import numpy as np
from six import iteritems
from gensim import utils
from gensim.test.utils import datapath
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
result = utils.is_corpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.is_corpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
class TestUtils(unittest.TestCase):
def test_decode_entities(self):
# create a string that fails to decode with unichr on narrow python builds
body = u'It’s the Year of the Horse. YES VIN DIESEL 🙌 💯'
expected = u'It\x92s the Year of the Horse. YES VIN DIESEL \U0001f64c \U0001f4af'
self.assertEqual(utils.decode_htmlentities(body), expected)
def test_open_file_existent_file(self):
number_of_lines_in_file = 30
with utils.open_file(datapath('testcorpus.mm')) as infile:
self.assertEqual(sum(1 for _ in infile), number_of_lines_in_file)
def test_open_file_non_existent_file(self):
with self.assertRaises(Exception):
with utils.open_file('non_existent_file.txt'):
pass
def test_open_file_existent_file_object(self):
number_of_lines_in_file = 30
file_obj = open(datapath('testcorpus.mm'))
with utils.open_file(file_obj) as infile:
self.assertEqual(sum(1 for _ in infile), number_of_lines_in_file)
def test_open_file_non_existent_file_object(self):
file_obj = None
with self.assertRaises(Exception):
with utils.open_file(file_obj):
pass
class TestSampleDict(unittest.TestCase):
def test_sample_dict(self):
d = {1: 2, 2: 3, 3: 4, 4: 5}
expected_dict = [(1, 2), (2, 3)]
expected_dict_random = [(k, v) for k, v in iteritems(d)]
sampled_dict = utils.sample_dict(d, 2, False)
self.assertEqual(sampled_dict, expected_dict)
sampled_dict_random = utils.sample_dict(d, 2)
if sampled_dict_random in expected_dict_random:
self.assertTrue(True)
class TestWindowing(unittest.TestCase):
arr10_5 = np.array([
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9]
])
def _assert_arrays_equal(self, expected, actual):
self.assertEqual(expected.shape, actual.shape)
self.assertTrue((actual == expected).all())
def test_strided_windows1(self):
out = utils.strided_windows(range(5), 2)
expected = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 4]
])
self._assert_arrays_equal(expected, out)
def test_strided_windows2(self):
input_arr = np.arange(10)
out = utils.strided_windows(input_arr, 5)
expected = self.arr10_5.copy()
self._assert_arrays_equal(expected, out)
out[0, 0] = 10
self.assertEqual(10, input_arr[0], "should make view rather than copy")
def test_strided_windows_window_size_exceeds_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 4)
expected = np.ndarray((0, 0))
self._assert_arrays_equal(expected, out)
def test_strided_windows_window_size_equals_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 3)
expected = np.array([input_arr.copy()])
self._assert_arrays_equal(expected, out)
def test_iter_windows_include_below_window_size(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
out = utils.iter_windows(texts, 3, ignore_below_size=False)
windows = [list(w) for w in out]
self.assertEqual(texts, windows)
out = utils.iter_windows(texts, 3)
windows = [list(w) for w in out]
self.assertEqual([texts[0]], windows)
def test_iter_windows_list_texts(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
def test_iter_windows_uses_views(self):
texts = [np.array(['this', 'is', 'a'], dtype='object'), ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
windows[0][0] = 'modified'
self.assertEqual('modified', texts[0][0])
def test_iter_windows_with_copy(self):
texts = [
np.array(['this', 'is', 'a'], dtype='object'),
np.array(['test', 'document'], dtype='object')
]
windows = list(utils.iter_windows(texts, 2, copy=True))
windows[0][0] = 'modified'
self.assertEqual('this', texts[0][0])
windows[2][0] = 'modified'
self.assertEqual('test', texts[1][0])
def test_flatten_nested(self):
nested_list = [[[1, 2, 3], [4, 5]], 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(nested_list), expected)
def test_flatten_not_nested(self):
not_nested = [1, 2, 3, 4, 5, 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(not_nested), expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 7,758 | 34.108597 | 89 | py |
poincare_glove | poincare_glove-master/gensim/test/test_translation_matrix.py | #!/usr/bin/env python
# encoding: utf-8
from collections import namedtuple
import unittest
import math
import numpy as np
from scipy.spatial.distance import cosine
from gensim.models.doc2vec import Doc2Vec
from gensim import utils
from gensim.models import translation_matrix
from gensim.models import KeyedVectors
from gensim.test.utils import datapath, get_tmpfile
class TestTranslationMatrix(unittest.TestCase):
def setUp(self):
self.source_word_vec_file = datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
self.target_word_vec_file = datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
self.word_pairs = [("one", "uno"), ("two", "due"), ("three", "tre"),
("four", "quattro"), ("five", "cinque"), ("seven", "sette"), ("eight", "otto"),
("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
]
self.test_word_pairs = [("ten", "dieci"), ("cat", "gatto")]
self.source_word_vec = KeyedVectors.load_word2vec_format(self.source_word_vec_file, binary=False)
self.target_word_vec = KeyedVectors.load_word2vec_format(self.target_word_vec_file, binary=False)
def test_translation_matrix(self):
model = translation_matrix.TranslationMatrix(self.source_word_vec, self.target_word_vec, self.word_pairs)
model.train(self.word_pairs)
self.assertEqual(model.translation_matrix.shape, (300, 300))
def testPersistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('transmat-en-it.pkl')
model = translation_matrix.TranslationMatrix(self.source_word_vec, self.target_word_vec, self.word_pairs)
model.train(self.word_pairs)
model.save(tmpf)
loaded_model = translation_matrix.TranslationMatrix.load(tmpf)
self.assertTrue(np.allclose(model.translation_matrix, loaded_model.translation_matrix))
def test_translate_nn(self):
# Test the nearest neighbor retrieval method
model = translation_matrix.TranslationMatrix(self.source_word_vec, self.target_word_vec, self.word_pairs)
model.train(self.word_pairs)
test_source_word, test_target_word = zip(*self.test_word_pairs)
translated_words = model.translate(
test_source_word, topn=5, source_lang_vec=self.source_word_vec, target_lang_vec=self.target_word_vec
)
for idx, item in enumerate(self.test_word_pairs):
self.assertTrue(item[1] in translated_words[item[0]])
def test_translate_gc(self):
# Test globally corrected neighbour retrieval method
model = translation_matrix.TranslationMatrix(self.source_word_vec, self.target_word_vec, self.word_pairs)
model.train(self.word_pairs)
test_source_word, test_target_word = zip(*self.test_word_pairs)
translated_words = model.translate(
test_source_word, topn=5, gc=1, sample_num=3,
source_lang_vec=self.source_word_vec, target_lang_vec=self.target_word_vec
)
for idx, item in enumerate(self.test_word_pairs):
self.assertTrue(item[1] in translated_words[item[0]])
def read_sentiment_docs(filename):
sentiment_document = namedtuple('SentimentDocument', 'words tags')
alldocs = [] # will hold all docs in original order
with utils.smart_open(filename, encoding='utf-8') as alldata:
for line_no, line in enumerate(alldata):
tokens = utils.to_unicode(line).split()
words = tokens
tags = str(line_no)
alldocs.append(sentiment_document(words, tags))
return alldocs
class TestBackMappingTranslationMatrix(unittest.TestCase):
def setUp(self):
filename = datapath("alldata-id-10.txt")
train_docs = read_sentiment_docs(filename)
self.train_docs = train_docs
self.source_doc_vec_file = datapath("small_tag_doc_5_iter50")
self.target_doc_vec_file = datapath("large_tag_doc_10_iter50")
self.source_doc_vec = Doc2Vec.load(self.source_doc_vec_file)
self.target_doc_vec = Doc2Vec.load(self.target_doc_vec_file)
def test_translation_matrix(self):
model = translation_matrix.BackMappingTranslationMatrix(
self.source_doc_vec, self.target_doc_vec, self.train_docs[:5]
)
transmat = model.train(self.train_docs[:5])
self.assertEqual(transmat.shape, (100, 100))
def test_infer_vector(self):
model = translation_matrix.BackMappingTranslationMatrix(
self.source_doc_vec, self.target_doc_vec, self.train_docs[:5]
)
model.train(self.train_docs[:5])
infered_vec = model.infer_vector(self.target_doc_vec.docvecs[self.train_docs[5].tags])
self.assertEqual(infered_vec.shape, (100, ))
expected = 0.6453547135
eps = 1e-6
caculated = cosine(self.target_doc_vec.docvecs[self.train_docs[5].tags], infered_vec)
self.assertLessEqual(math.fabs(caculated - expected), eps)
| 5,123 | 42.058824 | 113 | py |
poincare_glove | poincare_glove-master/gensim/scripts/segment_wiki.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayant@rare-technologies.com>
# Copyright (C) 2016 RaRe Technologies
"""This script using for extracting plain text out of a raw Wikipedia dump. Input is an xml.bz2 file provided
by MediaWiki that looks like <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2 or <LANG>wiki-latest-pages-articles.xml.bz2
(e.g. 14 GB of https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2).
It streams through all the XML articles using multiple cores (#cores - 1, by default),
decompressing on the fly and extracting plain text from the articles and their sections.
For each extracted article, it prints its title, section names and plain text section contents, in json-line format.
How to use
----------
#. Process Wikipedia dump with this script ::
python -m gensim.scripts.segment_wiki -i -f enwiki-latest-pages-articles.xml.bz2 -o enwiki-latest.json.gz
#. Read output in simple way
>>> from smart_open import smart_open
>>> import json
>>>
>>> # iterate over the plain text data we just created
>>> for line in smart_open('enwiki-latest.json.gz'):
>>> # decode each JSON line into a Python dictionary object
>>> article = json.loads(line)
>>>
>>> # each article has a "title", a mapping of interlinks and a list of "section_titles" and "section_texts".
>>> print("Article title: %s" % article['title'])
>>> print("Interlinks: %s" + article['interlinks'])
>>> for section_title, section_text in zip(article['section_titles'], article['section_texts']):
>>> print("Section title: %s" % section_title)
>>> print("Section text: %s" % section_text)
Notes
-----
Processing the entire English Wikipedia dump takes 1.7 hours (about 3 million articles per hour,
or 10 MB of XML per second) on an 8 core Intel i7-7700 @3.60GHz.
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.segment_wiki --help
:ellipsis: 0, -10
"""
import argparse
import json
import logging
import multiprocessing
import re
import sys
from xml.etree import cElementTree
from functools import partial
from gensim.corpora.wikicorpus import IGNORED_NAMESPACES, WikiCorpus, filter_wiki, find_interlinks, get_namespace, utils
from smart_open import smart_open
logger = logging.getLogger(__name__)
def segment_all_articles(file_path, min_article_character=200, workers=None, include_interlinks=False):
"""Extract article titles and sections from a MediaWiki bz2 database dump.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
Yields
------
(str, list of (str, str), (Optionally) dict of str: str)
Structure contains (title, [(section_heading, section_content), ...], (Optionally) {interlinks}).
"""
with smart_open(file_path, 'rb') as xml_fileobj:
wiki_sections_corpus = _WikiSectionsCorpus(
xml_fileobj, min_article_character=min_article_character, processes=workers,
include_interlinks=include_interlinks)
wiki_sections_corpus.metadata = True
wiki_sections_text = wiki_sections_corpus.get_texts_with_sections()
for article in wiki_sections_text:
yield article
def segment_and_write_all_articles(file_path, output_file, min_article_character=200, workers=None,
include_interlinks=False):
"""Write article title and sections to `output_file` (or stdout, if output_file is None).
The output format is one article per line, in json-line format with 4 fields::
'title' - title of article,
'section_titles' - list of titles of sections,
'section_texts' - list of content from sections,
(Optional) 'section_interlinks' - list of interlinks in the article.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
output_file : str or None
Path to output file in json-lines format, or None for printing to stdout.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
if output_file is None:
outfile = getattr(sys.stdout, 'buffer', sys.stdout) # we want write bytes, so for py3 we used 'buffer'
else:
outfile = smart_open(output_file, 'wb')
try:
article_stream = segment_all_articles(file_path, min_article_character, workers=workers,
include_interlinks=include_interlinks)
for idx, article in enumerate(article_stream):
article_title, article_sections = article[0], article[1]
if include_interlinks:
interlinks = article[2]
output_data = {
"title": article_title,
"section_titles": [],
"section_texts": [],
}
if include_interlinks:
output_data["interlinks"] = interlinks
for section_heading, section_content in article_sections:
output_data["section_titles"].append(section_heading)
output_data["section_texts"].append(section_content)
if (idx + 1) % 100000 == 0:
logger.info("processed #%d articles (at %r now)", idx + 1, article_title)
outfile.write((json.dumps(output_data) + "\n").encode('utf-8'))
finally:
if output_file is not None:
outfile.close()
def extract_page_xmls(f):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File descriptor of MediaWiki dump.
Yields
------
str
XML strings for page tags.
"""
elems = (elem for _, elem in cElementTree.iterparse(f, events=("end",)))
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
yield cElementTree.tostring(elem)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
def segment(page_xml, include_interlinks=False):
"""Parse the content inside a page tag
Parameters
----------
page_xml : str
Content from page tag.
include_interlinks : bool
Whether or not interlinks should be parsed.
Returns
-------
(str, list of (str, str), (Optionally) dict of (str: str))
Structure contains (title, [(section_heading, section_content), ...], (Optionally) {interlinks}).
"""
elem = cElementTree.fromstring(page_xml)
filter_namespaces = ('0',)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
lead_section_heading = "Introduction"
top_level_heading_regex = r"\n==[^=].*[^=]==\n"
top_level_heading_regex_capture = r"\n==([^=].*[^=])==\n"
title = elem.find(title_path).text
text = elem.find(text_path).text
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
if text is not None:
if include_interlinks:
interlinks = find_interlinks(text)
section_contents = re.split(top_level_heading_regex, text)
section_headings = [lead_section_heading] + re.findall(top_level_heading_regex_capture, text)
section_headings = [heading.strip() for heading in section_headings]
assert len(section_contents) == len(section_headings)
else:
interlinks = []
section_contents = []
section_headings = []
section_contents = [filter_wiki(section_content) for section_content in section_contents]
sections = list(zip(section_headings, section_contents))
if include_interlinks:
return title, sections, interlinks
else:
return title, sections
class _WikiSectionsCorpus(WikiCorpus):
"""Treat a wikipedia articles dump (<LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
"""
def __init__(self, fileobj, min_article_character=200, processes=None,
lemmatize=utils.has_pattern(), filter_namespaces=('0',), include_interlinks=False):
"""
Parameters
----------
fileobj : file
File descriptor of MediaWiki dump.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
processes : int, optional
Number of processes, max(1, multiprocessing.cpu_count() - 1) if None.
lemmatize : bool, optional
If `pattern` package is installed, use fancier shallow parsing to get token lemmas.
Otherwise, use simple regexp tokenization.
filter_namespaces : tuple of int, optional
Enumeration of namespaces that will be ignored.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
self.fileobj = fileobj
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
self.min_article_character = min_article_character
self.include_interlinks = include_interlinks
def get_texts_with_sections(self):
"""Iterate over the dump, returning titles and text versions of all sections of articles.
Notes
-----
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function::
>>> for vec in wiki_corpus:
>>> print(vec)
Yields
------
(str, list of (str, str), dict of (str: str))
Structure contains (title, [(section_heading, section_content), ...], (Optionally){interlinks}).
"""
skipped_namespace, skipped_length, skipped_redirect = 0, 0, 0
total_articles, total_sections = 0, 0
page_xmls = extract_page_xmls(self.fileobj)
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(page_xmls, chunksize=10 * self.processes, maxsize=1):
for article in pool.imap(partial(segment, include_interlinks=self.include_interlinks),
group): # chunksize=10): partial(merge_names, b='Sons')
article_title, sections = article[0], article[1]
# article redirects are pruned here
if any(article_title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES): # filter non-articles
skipped_namespace += 1
continue
if not sections or sections[0][1].lstrip().lower().startswith("#redirect"): # filter redirect
skipped_redirect += 1
continue
if sum(len(body.strip()) for (_, body) in sections) < self.min_article_character:
# filter stubs (incomplete, very short articles)
skipped_length += 1
continue
total_articles += 1
total_sections += len(sections)
if self.include_interlinks:
interlinks = article[2]
yield (article_title, sections, interlinks)
else:
yield (article_title, sections)
logger.info(
"finished processing %i articles with %i sections (skipped %i redirects, %i stubs, %i ignored namespaces)",
total_articles, total_sections, skipped_redirect, skipped_length, skipped_namespace)
pool.terminate()
self.length = total_articles # cache corpus length
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=__doc__[:-136])
default_workers = max(1, multiprocessing.cpu_count() - 1)
parser.add_argument('-f', '--file', help='Path to MediaWiki database dump (read-only).', required=True)
parser.add_argument(
'-o', '--output',
help='Path to output file (stdout if not specified). If ends in .gz or .bz2, '
'the output file will be automatically compressed (recommended!).')
parser.add_argument(
'-w', '--workers',
help='Number of parallel workers for multi-core systems. Default: %(default)s.',
type=int,
default=default_workers
)
parser.add_argument(
'-m', '--min-article-character',
help="Ignore articles with fewer characters than this (article stubs). Default: %(default)s.",
default=200
)
parser.add_argument(
'-i', '--include-interlinks',
help='Include a mapping for interlinks to other articles in the dump. The mappings format is: '
'"interlinks": {"article_title_1": "interlink_text_1", "article_title_2": "interlink_text_2", ...}',
action='store_true'
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
segment_and_write_all_articles(
args.file, args.output,
min_article_character=args.min_article_character,
workers=args.workers,
include_interlinks=args.include_interlinks
)
logger.info("finished running %s", sys.argv[0])
| 15,257 | 38.22365 | 120 | py |
poincare_glove | poincare_glove-master/gensim/scripts/package_info.py | """Get basic information about gensim & dependencies (useful for bug-reporting).
Examples
--------
You can use it through python
>>> from gensim.scripts.package_info import package_info
>>>
>>> info = package_info()
or using CLI interface
::
python -m gensim.scripts.package_info --info
.. program-output:: python -m gensim.scripts.package_info --help
:ellipsis: 0, -4
"""
import argparse
import platform
import sys
import os
import numpy
import scipy
import gensim
from gensim.models.word2vec import FAST_VERSION
def package_info():
"""Get the versions of Gensim and its dependencies,
the location where Gensim is installed and platform on which the system is running.
Returns
-------
dict of (str, str)
Dictionary containing the versions of Gensim, Python, NumPy, SciPy and platform information.
"""
return {
"Platform": platform.platform(),
"Python": sys.version.replace("\n", ', '),
"NumPy": numpy.__version__,
"SciPy": scipy.__version__,
"Gensim": gensim.__version__,
"Location": os.path.abspath(__file__),
"FAST_VERSION": FAST_VERSION
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__[:-65], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--info", help="Information about Gensim package", action="store_true")
args = parser.parse_args()
if args.info:
print("Gensim installation information\n")
for (k, v) in sorted(package_info().items()):
print("{}: {}".format(k, v))
| 1,596 | 25.180328 | 117 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wiki.py | make_wikicorpus.py | 18 | 18 | 18 | py |
poincare_glove | poincare_glove-master/gensim/scripts/word2vec_main.py | #!/usr/local/bin/python3
import argparse
import gensim
from gensim.models.callbacks import LossLogger, LossSetter, VectorNormLogger, WordEmbCheckpointSaver
from gensim.models.word2vec import InitializationConfig
from util_scripts.get_model_eval_and_stats import *
import logging
from nltk.corpus import brown
import numpy as np
from numpy.linalg import norm
import os
import time
WIKI_PATH = '/media/hofmann-scratch/other-data/Wikipedia/WikipediaPlainText/textFromAllWikipedia2014Feb.txt_one_doc_per_line'
LEVY_PATH = 'data/levy_dataset/levy_wikipedia_dataset'
TEXT8_FILE = "data/text8/text8"
MODEL_FILENAME_PATTERN = "models/{}/w2v_{}_{}_{}_{}_A{}_a{}_n{}_w{}_c{}_{}"
INITIALIZATION_MODEL_FILENAME_PATTERN = "data/pretrained_models/word2vec_pretrained_{}_{}"
# IMPORTANT!!!!!!!!!!! First one for each embedding type should be the default.
SUPPORTED_OPTIMIZERS = {
"cosine": ["sgd", "wsgd", "rmsprop"],
"euclid": ["sgd"],
"poincare": ["rsgd", "wrsgd", "fullrsgd", "wfullrsgd", "rmsprop"],
}
# IMPORTANT!! First one is the default.
# This refers to the similarity function used in the NLL loss during training.
SUPPORTED_SIM_FUNCTIONS = [
"dist-sq", "cosh-dist-sq", "cosh-dist-pow-*", "cosh-dist", "log-dist", "log-dist-sq", "exp-dist"
]
GOOGLE_SIZE = 19544
MSR_SIZE = 8000
logging.basicConfig(level=logging.INFO)
def precision(eval_result):
return len(eval_result['correct']) / (len(eval_result['correct']) + len(eval_result['incorrect']))
def get_sentences(dataset_name):
if dataset_name == 'brown':
data = brown.sents()
elif dataset_name == 'text8':
data = gensim.models.word2vec.Text8Corpus(os.path.join(args.root, TEXT8_FILE))
elif dataset_name == 'wiki':
data = gensim.models.word2vec.LineSentence(WIKI_PATH)
elif dataset_name == 'levy':
data = gensim.models.word2vec.LineSentence(os.path.join(args.root, LEVY_PATH))
else:
raise RuntimeError('Invalid dataset name')
return data
def split_filename(basename):
info = basename.split('_')
# Ugly fix. Insert default values for backward compatibility.
if len(info) < 9:
info.insert(5, "A025")
info.insert(6, "a0001")
info.insert(7, "n5")
info.insert(8, "w5")
info[9] = "c" + str(info[9])
if info[6][0] != 'a':
info.insert(6, "a0001")
return info
def compute_poincare_aggregate(model):
"""
Precompute the average between the target and the context vector, for Poincare embeddings.
We take as average the mid point between w and c on the geodesic that connects the 2 points
(see page 89 in Ungar book).
"""
if model.poincare and getattr(model.wv, 'agg_vectors', None) is None:
print("precomputing aggregated vectors w+c for Poincare embeddings")
gamma_w_sq = 1 / (1 - np.sum(model.wv.vectors * model.wv.vectors, axis=1))
gamma_c_sq = 1 / (1 - np.sum(model.trainables.syn1neg * model.trainables.syn1neg, axis=1))
denominator = gamma_w_sq + gamma_c_sq - 1
agg = (model.wv.vectors * (gamma_w_sq / denominator)[:, None] +
model.trainables.syn1neg * (gamma_c_sq / denominator)[:, None])
model.wv.vectors = model.wv.moebius_mul_mat(agg, 0.5)
def parse_model_filename(model_filename):
info_dict = {}
basename = os.path.basename(model_filename)
info = split_filename(basename)
info_dict["dataset"] = info[1]
info_dict["w2v_model"] = info[2]
info_dict["epochs"] = int(info[3])
info_dict["emb_size"] = int(info[4])
info_dict["alpha"] = float("0." + info[5][1:])
info_dict["min_alpha"] = float("0." + info[6][1:])
info_dict["negative"] = int(info[7][1:])
info_dict["window"] = int(info[8][1:])
info_dict["min_count"] = int(info[9][1:])
info_dict["similarity"] = info[10]
info_dict["l2reg_coef"] = float(info[11][1:]) if len(info) > 11 and info[11][0] == "l" else 0.0
info_dict["with_bias"] = True if "_bias" in basename else False
info_dict["init_near_border"] = True if "_border-init" in basename else False
info_dict["normalized"] = True if "_norm" in basename else False
burnin_info = list(filter(lambda x: x.startswith("burnin"), info))
info_dict["burnin_epochs"] = int(burnin_info[0][6:]) if len(burnin_info) == 1 else 0
for s in info:
if "OPT" in s:
info_dict["optimizer"] = s[3:]
elif "INIT" in s:
info_dict["init_config"] = s[4:]
elif "SIM" in s:
info_dict["sim_func"] = s[3:]
if "optimizer" not in info_dict:
info_dict["optimizer"] = SUPPORTED_OPTIMIZERS[info_dict["similarity"]][0]
if "init_config" not in info_dict:
info_dict["init_config"] = None
if "sim_func" not in info_dict:
info_dict["sim_func"] = SUPPORTED_SIM_FUNCTIONS[0]
return info_dict, basename
# Class that produces output both to stdout and to an output file. Used during evaluation.
class Logger:
def __init__(self, fout=None):
self.fout = fout
def log(self, log_str='', end='\n'):
logging.info(log_str)
if self.fout:
if end == '':
self.fout.write(log_str)
else:
self.fout.write(log_str + end)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ds', type=str, default='brown',
help='Dataset on which to train the model.')
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--train', dest='train', action='store_true',
help='Train a new model.')
parser.add_argument('--eval', dest='train', action='store_false',
help='Eval an existing model.')
parser.add_argument('--restrict_vocab', type=int, default=100000,
help='Size of vocab. Only used for evaluating analogy.')
parser.add_argument('--cosadd', dest='cosadd', action='store_true',
help='Use 3COSADD when evaluating word analogy.')
parser.add_argument('--cosmul', dest='cosmul', action='store_true',
help='Use 3COSMUL when evaluating word analogy.')
parser.add_argument('--sg', type=int, default=1, help='Choose W2V model. 1 = skip-gram; 0 = CBOW')
parser.add_argument('--alpha', type=float, default=0.025, help='Initial learning rate')
parser.add_argument('--min_alpha', type=float, default=0.0001, help='Min learning rate')
parser.add_argument('--l2reg', type=float, default=0.0, help='The coefficient of L2 regularization')
parser.add_argument('--optimizer', type=str, default='', help='What optimizer to use.')
parser.add_argument('--init_config', type=str, default='',
help='The initialization configuration to use. Should be in the format '
'<euclid2hyp_method><scaling_factor> e.g. exp0.1 or id0.01')
parser.add_argument('--sim_func', type=str, default="", help='Similarity function used by Poincare model.')
parser.add_argument('--size', type=int, default=100, help='Embedding size')
parser.add_argument('--negative', type=int, default=5, help='Number of negative samples that are considered')
parser.add_argument('--window', type=int, default=5, help='Sliding window size')
parser.add_argument('--nll', dest='is_nll', action='store_true', help='Use NLL loss instead of NegSampling')
parser.add_argument('--burnin_epochs', type=int, default=0, help='Number of burn-in epochs, before training')
parser.add_argument('--normalized', dest='normalized', action='store_true',
help='Normalize word vectors to unit norm after each update.')
parser.add_argument('--euclid', type=int, default=0, help='Whether it uses Euclidean distance for training or not.')
parser.add_argument('--poincare', type=int, default=0, help='Whether it uses Poincare embeddings or not.')
parser.add_argument('--epochs', type=int, default=5, help='Number of epochs')
parser.add_argument('--min_count', type=int, default=5,
help='Ignores all words with total frequency lower than this.')
parser.add_argument('--model_filename', type=str, default='', help='Path to saved model.')
parser.add_argument('--train_log_filename', type=str, default='', help='Path to the training log.')
parser.add_argument('--workers', type=int, default=3, help='Number of concurrent workers.')
parser.add_argument('--bias', dest='with_bias', action='store_true', help='Use a model with biases.')
parser.add_argument('--init_near_border', dest='init_near_border', action='store_true',
help='If set, initialize embeddings near the Poincare ball border, instead of near the origin.')
parser.add_argument('--agg_eval', dest='agg_eval', action='store_true',
help='Use w+c during evaluation, instead of just w. Only works for Poincare embeddings.')
parser.add_argument('--ctx_eval', dest='ctx_eval', action='store_true',
help='Use c during evaluation, instead of w.')
parser.add_argument('--shift_origin', dest='shift_origin', action='store_true',
help='Shift the origin of the points before evaluation.')
parser.add_argument('--cosine_eval', dest='cosine_eval', action='store_true',
help='Use cosine distance during evaluation, instead of the Poincare distance.')
parser.add_argument('--ckpt_emb', dest='ckpt_emb', action='store_true',
help='Store checkpoints during training with the value of the embedding for certain words')
parser.add_argument('--debug', dest='is_debug', action='store_true',
help='Run model in debug mode')
parser.set_defaults(train=False, cosadd=False, cosmul=False, is_nll=False, normalized=False,
with_bias=False, init_near_border=False, agg_eval=False, ctx_eval=False, shift_origin=False,
cosine_dist=False, ckpt_emb=False, is_debug=False)
args = parser.parse_args()
if args.size > 4 and args.size % 4 != 0:
raise RuntimeError("Choose an embedding size that is a multiple of 4 (it speeds up computation)")
model = None
if args.train:
callbacks = [
LossSetter(),
LossLogger(log_file=args.train_log_filename),
VectorNormLogger(log_file=args.train_log_filename)]
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if args.model_filename:
# Continue training an existing model. All hyperparameters will be extracted from the filename. The
# number of additional epochs for which we want to train needs to be provided as parameter. The new
# model will be stored in a file with the same name, but with a different number of epochs in the name.
# Extract model info from the model filename.
config, basename = parse_model_filename(args.model_filename)
print("[Training] Continue training model {} for an additional {} epochs".format(basename, args.epochs))
sentences = get_sentences(config["dataset"])
# Load and train model.
model = gensim.models.Word2Vec.load(args.model_filename)
if abs(model.min_alpha_yet_reached - args.min_alpha) < 0.0001:
print("[WARNING] The learning rate will be constant throughout training!")
model.train(
sentences, total_examples=model.corpus_count, epochs=args.epochs,
start_alpha=args.alpha, end_alpha=args.min_alpha, compute_loss=True,
callbacks=callbacks)
# Update filename with the new number of epochs.
filename = MODEL_FILENAME_PATTERN.format(
"word2vec_baseline" if config["similarity"] == "cosine" else "geometric_emb",
config["dataset"], config["w2v_model"], (config["epochs"] + args.epochs), config["emb_size"], str(args.alpha)[2:], str(args.min_alpha)[2:], config["negative"],
config["window"], config["min_count"], config["similarity"])
if config["l2reg_coef"]:
filename = filename + "_l" + str(config["l2reg_coef"])
if config["with_bias"]:
filename = filename + "_bias"
if config["init_near_border"]:
filename = filename + "_border-init"
if config["normalized"]:
filename = filename + "_norm"
if config["burnin_epochs"] != 0:
filename = filename + "_burnin" + str(config["burnin_epochs"])
filename += "_cont"
new_model_filename = os.path.join(args.root, filename)
else:
sentences = get_sentences(args.ds)
emb_type = None
if args.euclid == 1:
emb_type = 'euclid'
elif args.poincare == 1:
emb_type = 'poincare'
else:
emb_type = 'cosine'
optimizer = args.optimizer
if optimizer == "":
optimizer = SUPPORTED_OPTIMIZERS[emb_type][0] # Set the default
else:
if optimizer not in SUPPORTED_OPTIMIZERS[emb_type]:
raise RuntimeError("Unsupported optimizer {} for embedding type {}".format(optimizer, emb_type))
sim_func = args.sim_func
cosh_dist_pow = 0.0
if sim_func != "" and emb_type != "poincare":
raise RuntimeError("Choosing a different similarity function is only supported for poincare embeddings")
if emb_type == "poincare":
if sim_func == "":
sim_func = SUPPORTED_SIM_FUNCTIONS[0] # Set the default
elif "cosh-dist-pow" in sim_func:
cosh_dist_pow = int(sim_func.rsplit("-", 1)[1])
else:
if sim_func not in SUPPORTED_SIM_FUNCTIONS:
raise RuntimeError("Unsupported similarity function {}".format(sim_func))
model_type = None
if args.sg == 1:
if args.is_nll:
model_type = "nll"
else:
model_type = "sg"
else:
model_type = "cbow"
# Check if folders exist and create them otherwise.
directory = os.path.join(args.root, "eval_logs")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(args.root, "models")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(args.root, "word_emb_checkpoints")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(args.root, "models/word2vec_baseline")
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(args.root, "models/geometric_emb")
if not os.path.exists(directory):
os.makedirs(directory)
filename = MODEL_FILENAME_PATTERN.format(
"word2vec_baseline" if args.euclid == 0 and args.poincare == 0 else "geometric_emb",
args.ds, model_type, args.epochs, args.size, str(args.alpha)[2:],
str(args.min_alpha)[2:], args.negative, args.window, args.min_count, emb_type)
filename = filename + "_OPT" + optimizer
if emb_type == "poincare":
filename = filename + "_SIM" + sim_func
initialization_config = None
if args.init_config:
pretrained_model_filename = INITIALIZATION_MODEL_FILENAME_PATTERN.format(args.ds, args.size)
print("Initializing embeddings from pretrained model", pretrained_model_filename)
initialization_config = InitializationConfig(
pretrained_model_filename=os.path.join(args.root, pretrained_model_filename),
config_str=args.init_config
)
filename = filename + "_INIT" + args.init_config
if args.l2reg:
filename = filename + "_l" + str(args.l2reg)
if args.with_bias:
filename = filename + "_bias"
if args.init_near_border:
filename = filename + "_border-init"
if args.normalized:
filename = filename + "_norm"
if args.burnin_epochs != 0:
filename = filename + "_burnin" + str(args.burnin_epochs)
new_model_filename = os.path.join(args.root, filename)
ckpt_word_list = None
if args.ckpt_emb:
with open(os.path.join(args.root, "data/google_analogy_vocab.txt"), "r") as f:
ckpt_word_list = [word.strip() for word in f.readlines()]
ckpt_filename = "word_emb_checkpoints/emb_ckpt_" + os.path.basename(new_model_filename).split("_", 1)[1]
ckpt_filename = os.path.join(args.root, ckpt_filename)
callbacks.append(WordEmbCheckpointSaver(ckpt_filename=ckpt_filename))
print("[Training] Train new model {} using {}".format(new_model_filename, optimizer.upper()), end="")
if emb_type == "poincare":
print(" and similarity function {}".format(sim_func.upper()))
else:
print("")
# The first input to Word2Vec is a list of lists of strings. Each item in
# the top-level list is a list of the words and special punctuation
# (e.g. . or "). One such item corresponds to one sentence.
model = gensim.models.Word2Vec(
sentences,
sg=args.sg,
is_nll=args.is_nll,
normalized=args.normalized,
burnin_epochs=args.burnin_epochs,
euclid=args.euclid,
poincare=args.poincare,
size=args.size,
alpha=args.alpha,
min_alpha=args.min_alpha,
l2reg_coef=args.l2reg,
optimizer=optimizer,
sim_func=sim_func,
cosh_dist_pow=cosh_dist_pow,
negative=args.negative,
window=args.window,
min_count=args.min_count,
iter=args.epochs,
workers=args.workers,
compute_loss=True,
with_bias=args.with_bias,
init_near_border=args.init_near_border,
initialization_config=initialization_config,
ckpt_word_list=ckpt_word_list,
debug=args.is_debug,
callbacks=callbacks)
# Save model.
with open(new_model_filename, "wb") as f:
model.save(f)
# Sanity check the norm of some random words if the word embeddings need to be normalized.
if args.normalized:
logging.info("")
logging.info("Sanity check-normalized word embeddings (word: norm):")
words = ["dog", "man", "king", "usa", "something"]
for w in words:
logging.info("\t {}: {}".format(w, norm(model.wv[w])))
if args.with_bias:
logging.info("")
logging.info("Sanity check-sum of: input biases {}; output biases {}".format(
np.sum(model.trainables.b0), np.sum(model.trainables.b1)))
if optimizer == "wfullrsgd" or optimizer == "fullrsgd":
logging.info("")
logging.info("Number of projections back to the Poincare ball: {}".format(model.num_projections))
else:
model = gensim.models.Word2Vec.load(args.model_filename)
wv = model.wv
wv.trainables = model.trainables
# XXX: uncomment to evaluate the model with the scaled and projected pretrained embeddings used for initialization
# wv.vectors = model.trainables.initialization_config.init_vectors
directory = os.path.join(args.root, "eval_logs")
if not os.path.exists(directory):
os.makedirs(directory)
# Extract model info from the model filename.
basename = os.path.basename(args.model_filename)
config, basename = parse_model_filename(args.model_filename)
analogy_type = None
if args.cosadd:
analogy_type = "cosadd"
elif args.cosmul:
analogy_type = "cosmul"
elif config["similarity"] == "poincare":
analogy_type = "hyp_pt"
else:
analogy_type = "cosadd" # The default for dot product and Euclidean embeddings is 3COSADD
if config["similarity"] == "poincare" and args.agg_eval:
compute_poincare_aggregate(model)
if args.ctx_eval:
model.wv.vectors = model.trainables.syn1neg
if args.shift_origin:
left_offset = -np.average(model.wv.vectors, axis=0)
# right_offset = -np.average(model.wv.vectors, axis=0)
left_offset_mat = np.tile(left_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))
# right_offset_mat = np.tile(right_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))
model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(
left_offset_mat, model.wv.vectors)
# model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(
# model.wv.vectors, right_offset_mat)
if config["similarity"] == "poincare":
if args.cosine_eval:
model.wv.use_poincare_distance = False
else:
model.wv.use_poincare_distance = True
# Create name for file that will store the logs.
eval_log_filename = "eval_logs/eval_" + basename.split("_", 1)[1] + "_" + analogy_type + \
("_agg" if config["similarity"] == "poincare" and args.agg_eval else ("_ctx" if args.ctx_eval else ""))
eval_log_filename = eval_log_filename + ("_cosdist" if config["similarity"] == "poincare" and args.cosine_eval else "")
eval_log_filename = os.path.join(args.root, eval_log_filename)
feval = None
if args.restrict_vocab != 0:
feval = open(eval_log_filename, "w+")
logger = Logger(feval)
else:
# Don't save the output to file if we are not running the word analogy benchmarks.
logger = Logger()
l2reg_coef = str(config["l2reg_coef"])
logger.log('MODEL: (Dataset, {}), (W2V model, {}), (Epochs, {}), (Emb size, {}), (Alpha, {}), (Min alpha, {}), (Negative, {}), (Window, {}), (Similarity, {}), (Optimizer, {}), (Sim. func, {}), (Init config, {}), (With Bias, {}), (Normalized, {}), (L2Reg Coeff, {}), (Burn-in, {})'.format(
config["dataset"], config["w2v_model"], config["epochs"], config["emb_size"], config["alpha"],
config["min_alpha"], config["negative"], config["window"], config["similarity"],
config["optimizer"].upper(), config["sim_func"].upper(), config["init_config"],
"yes" if config["with_bias"] else "no", "yes" if config["normalized"] else "no",
config["l2reg_coef"], "yes" if config["burnin_epochs"] else "no"
))
if args.restrict_vocab != 0:
logger.log('EVALUATION: (Analogy type, {}), (Restrict vocab, {}), (Vectors used, {}), (Distance, {})'.format(
analogy_type, args.restrict_vocab, ("W+C" if args.agg_eval else ("C" if args.ctx_eval else "W")),
("cosine" if config["similarity"] != "poincare" or args.cosine_eval else "Poincare")))
else:
logger.log()
sim_debug_file = None
if args.is_debug:
sim_debug_file = os.path.join(args.root, "eval_logs/debug_similarity.csv")
logger.log("========= Various statistics =========")
norms_distribution(model)
wordnet_level_rank_vector_norm_correlation(model, args.root)
logger.log("========= Similarity evaluation =========")
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'rare_word.txt'),
dummy4unknown=False
)
logger.log("Stanford Rare World: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'wordsim353.tsv'),
dummy4unknown=False,
debug_file=sim_debug_file
)
logger.log("WordSim353: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'simlex999.txt'),
dummy4unknown=False
)
logger.log("SimLex999: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
if args.restrict_vocab != 0:
logger.log("=========== Analogy evaluation ==========")
most_similar = None
if analogy_type == "cosadd":
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy
elif analogy_type == "cosmul":
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_cosmul_analogy
elif analogy_type == "hyp_pt":
most_similar = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.batch_most_similar_hyperbolic_analogy
else:
raise RuntimeError("Unknown analogy type.")
start = time.time()
analogy_eval = wv.accuracy(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'questions-words.txt'),
restrict_vocab=args.restrict_vocab,
most_similar=most_similar,
debug=args.is_debug)
# Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.
logger.log("Google: {} {} {} {}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / GOOGLE_SIZE))
if not args.is_debug:
analogy_eval = wv.accuracy(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data/', 'msr_word_relationship.processed'),
restrict_vocab=args.restrict_vocab,
most_similar=most_similar)
# Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.
logger.log("Microsoft: {} {} {} {}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / MSR_SIZE))
logging.info("")
logging.info("Analogy task took {} seconds to perform.".format(time.time() - start))
if feval:
feval.close()
| 28,081 | 50.811808 | 296 | py |
poincare_glove | poincare_glove-master/gensim/scripts/glove2word2vec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Manas Ranjan Kar <manasrkar91@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This script allows to convert GloVe vectors into the word2vec. Both files are
presented in text format and almost identical except that word2vec includes
number of vectors and its dimension which is only difference regard to GloVe.
Notes
-----
GloVe format (real example can be founded `on Stanford size <https://nlp.stanford.edu/projects/glove/>`_) ::
word1 0.123 0.134 0.532 0.152
word2 0.934 0.412 0.532 0.159
word3 0.334 0.241 0.324 0.188
...
word9 0.334 0.241 0.324 0.188
Word2Vec format (real example can be founded `on w2v old repository <https://code.google.com/archive/p/word2vec/>`_) ::
9 4
word1 0.123 0.134 0.532 0.152
word2 0.934 0.412 0.532 0.159
word3 0.334 0.241 0.324 0.188
...
word9 0.334 0.241 0.324 0.188
How to use
----------
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.models import KeyedVectors
>>>
>>> glove_file = datapath('test_glove.txt')
>>> tmp_file = get_tmpfile("test_word2vec.txt")
>>>
>>> # call glove2word2vec script
>>> # default way (through CLI): python -m gensim.scripts.glove2word2vec --input <glove_file> --output <w2v_file>
>>> from gensim.scripts.glove2word2vec import glove2word2vec
>>> glove2word2vec(glove_file, tmp_file)
>>>
>>> model = KeyedVectors.load_word2vec_format(tmp_file)
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.glove2word2vec --help
:ellipsis: 0, -5
"""
import sys
import logging
import argparse
from smart_open import smart_open
logger = logging.getLogger(__name__)
def get_glove_info(glove_file_name):
"""Get number of vectors in provided `glove_file_name` and dimension of vectors.
Parameters
----------
glove_file_name : str
Path to file in GloVe format.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
with smart_open(glove_file_name) as f:
num_lines = sum(1 for _ in f)
with smart_open(glove_file_name) as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims
def glove2word2vec(glove_input_file, word2vec_output_file):
"""Convert `glove_input_file` in GloVe format to word2vec format and write it to `word2vec_output_file`.
Parameters
----------
glove_input_file : str
Path to file in GloVe format.
word2vec_output_file: str
Path to output file.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
num_lines, num_dims = get_glove_info(glove_input_file)
logger.info("converting %i vectors from %s to %s", num_lines, glove_input_file, word2vec_output_file)
with smart_open(word2vec_output_file, 'wb') as fout:
fout.write("{0} {1}\n".format(num_lines, num_dims).encode('utf-8'))
with smart_open(glove_input_file, 'rb') as fin:
for line in fin:
fout.write(line)
return num_lines, num_dims
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input", required=True, help="Path to input file in GloVe format")
parser.add_argument("-o", "--output", required=True, help="Path to output file")
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
num_lines, num_dims = glove2word2vec(args.input, args.output)
logger.info('Converted model with %i vectors and %i dimensions', num_lines, num_dims)
| 3,920 | 30.620968 | 119 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wiki_online_lemma.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,582 | 39.557522 | 118 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wiki_online.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,582 | 39.557522 | 118 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wiki_lemma.py | make_wikicorpus.py | 18 | 18 | 18 | py |
poincare_glove | poincare_glove-master/gensim/scripts/eval_pretrained_emb.py | import gensim
import os
import sys
GOOGLE_SIZE = 19544
MSR_SIZE = 8000
filename = sys.argv[1]
restrict_vocab = 400000 # 189533
root = "../../../"
def precision(eval_result):
return len(eval_result['correct']) / (len(eval_result['correct']) + len(eval_result['incorrect']))
basename = os.path.basename(filename)
binary = False
if ".bin" in basename:
binary = True
model = gensim.models.KeyedVectors.load_word2vec_format(filename, binary=binary, limit=restrict_vocab)
print('(Model, {}), (Restrict vocab, {})'.format(basename, restrict_vocab))
print("============== Similarity evaluation ==============")
pearson, spearman, ratio = model.evaluate_word_pairs(os.path.join(root, 'msc_tifreaa/gensim/test/test_data', 'rare_word.txt'))
print("Stanford Rare World: {} {} {}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = model.evaluate_word_pairs(os.path.join(root, 'msc_tifreaa/gensim/test/test_data', 'wordsim353.tsv'))
print("WordSim353: {} {} {}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = model.evaluate_word_pairs(os.path.join(root, 'msc_tifreaa/gensim/test/test_data', 'simlex999.txt'))
print("SimLex999: {} {} {}".format(pearson[0], spearman[0], ratio))
print("=========== Analogy evaluation (3COSADD) ==========")
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy
analogy_eval = model.accuracy(
os.path.join(root, 'msc_tifreaa/gensim/test/test_data', 'questions-words.txt'),
restrict_vocab=restrict_vocab,
most_similar=most_similar)
print("Google: {} {} {} {}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / GOOGLE_SIZE))
analogy_eval = model.accuracy(
os.path.join(root, 'data/MSR-analogy-test-set', 'word_relationship.processed'),
restrict_vocab=restrict_vocab,
most_similar=most_similar)
print("Microsoft: {} {} {} {}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / MSR_SIZE))
print("=========== Analogy evaluation (3COSMUL) ==========")
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.most_similar_cosmul
analogy_eval = model.accuracy(
os.path.join(root, 'msc_tifreaa/gensim/test/test_data', 'questions-words.txt'),
restrict_vocab=restrict_vocab,
most_similar=most_similar)
print("Google: {} {} {} {}".format(len(analogy_eval[-1]['correct']),
len(analogy_eval[-1]['correct']) + len(analogy_eval[-1]['incorrect']),
precision(analogy_eval[-1]),
len(analogy_eval[-1]['correct']) / GOOGLE_SIZE))
analogy_eval = model.accuracy(
os.path.join(root, 'msc_tifreaa/gensim/test/test_data/', 'msr_word_relationship.processed'),
restrict_vocab=restrict_vocab,
most_similar=most_similar)
print("Microsoft: {} {} {} {}".format(len(analogy_eval[-1]['correct']),
len(analogy_eval[-1]['correct']) + len(analogy_eval[-1]['incorrect']),
precision(analogy_eval[-1]),
len(analogy_eval[-1]['correct']) / MSR_SIZE))
| 3,718 | 51.380282 | 139 | py |
poincare_glove | poincare_glove-master/gensim/scripts/word2vec2tensor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""This script allows converting word-vectors from word2vec format into Tensorflow 2D tensor and metadata format.
This script used for for word-vector visualization on `Embedding Visualization <http://projector.tensorflow.org/>`_.
How to use
----------
#. Convert your word-vector with this script (for example, we'll use model from
`gensim-data <https://rare-technologies.com/new-download-api-for-pretrained-nlp-models-and-datasets-in-gensim/>`_) ::
python -m gensim.downloader -d glove-wiki-gigaword-50 # download model in word2vec format
python -m gensim.scripts.word2vec2tensor -i ~/gensim-data/glove-wiki-gigaword-50/glove-wiki-gigaword-50.gz \
-o /tmp/my_model_prefix
#. Open http://projector.tensorflow.org/
#. Click "Load Data" button from the left menu.
#. Select "Choose file" in "Load a TSV file of vectors." and choose "/tmp/my_model_prefix_tensor.tsv" file.
#. Select "Choose file" in "Load a TSV file of metadata." and choose "/tmp/my_model_prefix_metadata.tsv" file.
#. ???
#. PROFIT!
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.word2vec2tensor --help
:ellipsis: 0, -7
"""
import os
import sys
import logging
import argparse
import gensim
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""Convert file in Word2Vec format and writes two files 2D tensor TSV file.
File "tensor_filename"_tensor.tsv contains word-vectors, "tensor_filename"_metadata.tsv contains words.
Parameters
----------
word2vec_model_path : str
Path to file in Word2Vec format.
tensor_filename : str
Prefix for output files.
binary : bool, optional
True if input file in binary format.
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with open(outfiletsv, 'w+') as file_vector:
with open(outfiletsvmeta, 'w+') as file_metadata:
for word in model.index2word:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(vector_row + '\n')
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__[:-138])
parser.add_argument("-i", "--input", required=True, help="Path to input file in word2vec format")
parser.add_argument("-o", "--output", required=True, help="Prefix path for output files")
parser.add_argument(
"-b", "--binary", action='store_const', const=True, default=False,
help="Set this flag if word2vec model in binary format (default: %(default)s)"
)
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
| 3,735 | 38.326316 | 120 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wiki_online_nodebug.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,582 | 39.557522 | 118 | py |
poincare_glove | poincare_glove-master/gensim/scripts/__init__.py | 0 | 0 | 0 | py | |
poincare_glove | poincare_glove-master/gensim/scripts/word2vec_standalone.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s -train CORPUS -output VECTORS -size SIZE -window WINDOW
-cbow CBOW -sample SAMPLE -hs HS -negative NEGATIVE -threads THREADS -iter ITER
-min_count MIN-COUNT -alpha ALPHA -binary BINARY -accuracy FILE
Trains a neural embedding model on text file CORPUS.
Parameters essentially reproduce those used by the original C tool
(see https://code.google.com/archive/p/word2vec/).
Parameters for training:
-train <file>
Use text data from <file> to train the model
-output <file>
Use <file> to save the resulting word vectors / word clusters
-size <int>
Set size of word vectors; default is 100
-window <int>
Set max skip length between words; default is 5
-sample <float>
Set threshold for occurrence of words. Those that appear with higher frequency in the training data
will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)
-hs <int>
Use Hierarchical Softmax; default is 0 (not used)
-negative <int>
Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)
-threads <int>
Use <int> threads (default 3)
-iter <int>
Run more training iterations (default 5)
-min_count <int>
This will discard words that appear less than <int> times; default is 5
-alpha <float>
Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW
-binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
-cbow <int>
Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)
-accuracy <file>
Compute accuracy of the resulting model analogical inference power on questions file <file>
See an example of questions file
at https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt
Example: python -m gensim.scripts.word2vec_standalone -train data.txt \
-output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3
"""
import logging
import os.path
import sys
import argparse
from numpy import seterr
from gensim.models.word2vec import Word2Vec, LineSentence # avoid referencing __main__ in pickle
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s", " ".join(sys.argv))
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled; "
"default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 3)", type=int, default=3)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-alpha", help="Set the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW",
type=float
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
if not args.alpha:
args.alpha = 0.025
else:
skipgram = 0
if not args.alpha:
args.alpha = 0.05
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, alpha=args.alpha, sg=skipgram,
hs=args.hs, negative=args.negative, cbow_mean=1, iter=args.iter
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train.split('.')[0]
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
questions_file = args.accuracy
model.accuracy(questions_file)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
| 5,966 | 40.4375 | 118 | py |
poincare_glove | poincare_glove-master/gensim/scripts/make_wikicorpus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example:
python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s", program)
| 4,582 | 39.557522 | 118 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/ldamodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
follows on scikit learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LdaTransformer(TransformerMixin, BaseEstimator):
"""
Base LDA module
"""
def __init__(self, num_topics=100, id2word=None, chunksize=2000, passes=1, update_every=1, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001,
minimum_probability=0.01, random_state=None, scorer='perplexity', dtype=np.float32):
"""
Sklearn wrapper for LDA model. See gensim.model.LdaModel for parameter details.
`scorer` specifies the metric used in the `score` function.
See `gensim.models.LdaModel` class for description of the other parameters.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.passes = passes
self.update_every = update_every
self.alpha = alpha
self.eta = eta
self.decay = decay
self.offset = offset
self.eval_every = eval_every
self.iterations = iterations
self.gamma_threshold = gamma_threshold
self.minimum_probability = minimum_probability
self.random_state = random_state
self.scorer = scorer
self.dtype = dtype
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LdaModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.LdaModel(
corpus=corpus, num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations,
gamma_threshold=self.gamma_threshold, minimum_probability=self.minimum_probability,
random_state=self.random_state, dtype=self.dtype
)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like
[[(4, 1), (7, 1)],
[(9, 1), (13, 1)], [(2, 1), (6, 1)]]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(distribution), (len(docs), self.num_topics))
def partial_fit(self, X):
"""
Train model over X.
By default, 'online (single-pass)' mode is used for training the LDA model.
Configure `passes` and `update_every` params at init to choose the mode among :
- online (single-pass): update_every != None and passes == 1
- online (multi-pass): update_every != None and passes > 1
- batch: update_every == None
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.LdaModel(
num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations, gamma_threshold=self.gamma_threshold,
minimum_probability=self.minimum_probability, random_state=self.random_state,
dtype=self.dtype
)
self.gensim_model.update(corpus=X)
return self
def score(self, X, y=None):
"""
Compute score reflecting how well the model has fit for the input data.
"""
if self.scorer == 'perplexity':
corpus_words = sum(cnt for document in X for _, cnt in document)
subsample_ratio = 1.0
perwordbound = \
self.gensim_model.bound(X, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
return -1 * np.exp2(-perwordbound) # returning (-1*perplexity) to select model with minimum value
elif self.scorer == 'u_mass':
goodcm = models.CoherenceModel(model=self.gensim_model, corpus=X, coherence=self.scorer, topn=3)
return goodcm.get_coherence()
else:
raise ValueError("Invalid value of `scorer` param supplied")
| 5,783 | 40.314286 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/d2vmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from six import string_types
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.models import doc2vec
class D2VTransformer(TransformerMixin, BaseEstimator):
"""
Base Doc2Vec module
"""
def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,
docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,
hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):
"""
Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.
"""
self.gensim_model = None
self.dm_mean = dm_mean
self.dm = dm
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
self.docvecs = docvecs
self.docvecs_mapfile = docvecs_mapfile
self.comment = comment
self.trim_rule = trim_rule
# attributes associated with gensim.models.Word2Vec
self.size = size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.Doc2Vec
"""
if isinstance(X[0], doc2vec.TaggedDocument):
d2v_sentences = X
else:
d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
self.gensim_model = models.Doc2Vec(
documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,
dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,
min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,
seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,
negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,
iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
)
return self
def transform(self, docs):
"""
Return the vector representations for the input documents.
The input `docs` should be a list of lists like
[['calculus', 'mathematical'],
['geometry', 'operations', 'curves']]
or a single document like : ['calculus', 'mathematical']
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], string_types):
docs = [docs]
vectors = [self.gensim_model.infer_vector(doc) for doc in docs]
return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))
| 4,008 | 38.693069 | 118 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/lsimodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LsiTransformer(TransformerMixin, BaseEstimator):
"""
Base LSI module
"""
def __init__(self, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, onepass=True, power_iters=2, extra_samples=100):
"""
Sklearn wrapper for LSI model. See gensim.model.LsiModel for parameter details.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.decay = decay
self.onepass = onepass
self.extra_samples = extra_samples
self.power_iters = power_iters
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LsiModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.LsiModel(
corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples
)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like
[[(4, 1), (7, 1)],
[(9, 1), (13, 1)], [(2, 1), (6, 1)]]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(distribution), (len(docs), self.num_topics))
def partial_fit(self, X):
"""
Train model over X.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.LsiModel(
num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, decay=self.decay,
onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples
)
self.gensim_model.add_documents(corpus=X)
return self
| 3,449 | 35.315789 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/rpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class RpTransformer(TransformerMixin, BaseEstimator):
"""
Base RP module
"""
def __init__(self, id2word=None, num_topics=300):
"""
Sklearn wrapper for RP model. See gensim.models.RpModel for parameter details.
"""
self.gensim_model = None
self.id2word = id2word
self.num_topics = num_topics
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.RpModel
"""
self.gensim_model = models.RpModel(corpus=X, id2word=self.id2word, num_topics=self.num_topics)
return self
def transform(self, docs):
"""
Take documents/corpus as input.
Return RP representation of the input documents/corpus.
The input `docs` can correspond to multiple documents like
[[(0, 1.0), (1, 1.0), (2, 1.0)],
[(0, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0)]]
or a single document like : [(0, 1.0), (1, 1.0), (2, 1.0)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
presentation = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(presentation), (len(docs), self.num_topics))
| 2,212 | 34.126984 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/tfidf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim.models import TfidfModel
import gensim
class TfIdfTransformer(TransformerMixin, BaseEstimator):
"""
Base Tf-Idf module
"""
def __init__(self, id2word=None, dictionary=None, wlocal=gensim.utils.identity,
wglobal=gensim.models.tfidfmodel.df2idf, normalize=True, smartirs="ntc"):
"""
Sklearn wrapper for Tf-Idf model.
"""
self.gensim_model = None
self.id2word = id2word
self.dictionary = dictionary
self.wlocal = wlocal
self.wglobal = wglobal
self.normalize = normalize
self.smartirs = smartirs
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
"""
self.gensim_model = TfidfModel(
corpus=X, id2word=self.id2word, dictionary=self.dictionary, wlocal=self.wlocal,
wglobal=self.wglobal, normalize=self.normalize, smartirs=self.smartirs,
)
return self
def transform(self, docs):
"""
Return the transformed documents after multiplication with the tf-idf matrix.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# input as python lists
if isinstance(docs[0], tuple):
docs = [docs]
return [self.gensim_model[doc] for doc in docs]
| 1,890 | 30.516667 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/hdp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class HdpTransformer(TransformerMixin, BaseEstimator):
"""
Base HDP module
"""
def __init__(self, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150,
alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
"""
Sklearn api for HDP model. See gensim.models.HdpModel for parameter details.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.HdpModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus, id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like
[[(4, 1), (7, 1)],
[(9, 1), (13, 1)], [(2, 1), (6, 1)]]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""
Train model over X.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
self.gensim_model.update(corpus=X)
return self
| 4,204 | 36.212389 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/atmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class AuthorTopicTransformer(TransformerMixin, BaseEstimator):
"""
Base AuthorTopic module
"""
def __init__(self, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
Sklearn wrapper for AuthorTopic model. See gensim.models.AuthorTopicModel for parameter details.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.author2doc = author2doc
self.doc2author = doc2author
self.chunksize = chunksize
self.passes = passes
self.iterations = iterations
self.decay = decay
self.offset = offset
self.alpha = alpha
self.eta = eta
self.update_every = update_every
self.eval_every = eval_every
self.gamma_threshold = gamma_threshold
self.serialized = serialized
self.serialization_path = serialization_path
self.minimum_probability = minimum_probability
self.random_state = random_state
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.AuthorTopicModel
"""
self.gensim_model = models.AuthorTopicModel(
corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,
serialized=self.serialized, serialization_path=self.serialization_path,
minimum_probability=self.minimum_probability, random_state=self.random_state
)
return self
def transform(self, author_names):
"""
Return topic distribution for input authors as a list of
(topic_id, topic_probabiity) 2-tuples.
"""
# The input as array of array
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
if not isinstance(author_names, list):
author_names = [author_names]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
topics = [matutils.sparse2full(self.gensim_model[author_name], self.num_topics) for author_name in author_names]
return np.reshape(np.array(topics), (len(author_names), self.num_topics))
def partial_fit(self, X, author2doc=None, doc2author=None):
"""
Train model over X.
"""
if self.gensim_model is None:
self.gensim_model = models.AuthorTopicModel(
corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,
serialized=self.serialized, serialization_path=self.serialization_path,
minimum_probability=self.minimum_probability, random_state=self.random_state
)
self.gensim_model.update(corpus=X, author2doc=author2doc, doc2author=doc2author)
return self
| 4,485 | 42.980392 | 120 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/text2bow.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
from six import string_types
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim.corpora import Dictionary
from gensim.utils import tokenize
class Text2BowTransformer(TransformerMixin, BaseEstimator):
"""
Base Text2Bow module
"""
def __init__(self, prune_at=2000000, tokenizer=tokenize):
"""
Sklearn wrapper for Text2Bow model.
"""
self.gensim_model = None
self.prune_at = prune_at
self.tokenizer = tokenizer
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
"""
tokenized_docs = [list(self.tokenizer(x)) for x in X]
self.gensim_model = Dictionary(documents=tokenized_docs, prune_at=self.prune_at)
return self
def transform(self, docs):
"""
Return the BOW format for the input documents.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# input as python lists
if isinstance(docs, string_types):
docs = [docs]
tokenized_docs = (list(self.tokenizer(doc)) for doc in docs)
return [self.gensim_model.doc2bow(doc) for doc in tokenized_docs]
def partial_fit(self, X):
if self.gensim_model is None:
self.gensim_model = Dictionary(prune_at=self.prune_at)
tokenized_docs = [list(self.tokenizer(x)) for x in X]
self.gensim_model.add_documents(tokenized_docs)
return self
| 1,966 | 30.222222 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn wrapper for gensim.
Contains various gensim based implementations which match with scikit-learn standards.
See [1] for complete set of conventions.
[1] http://scikit-learn.org/stable/developers/
"""
from .ldamodel import LdaTransformer # noqa: F401
from .lsimodel import LsiTransformer # noqa: F401
from .rpmodel import RpTransformer # noqa: F401
from .ldaseqmodel import LdaSeqTransformer # noqa: F401
from .w2vmodel import W2VTransformer # noqa: F401
from .atmodel import AuthorTopicTransformer # noqa: F401
from .d2vmodel import D2VTransformer # noqa: F401
from .text2bow import Text2BowTransformer # noqa: F401
from .tfidf import TfIdfTransformer # noqa: F401
from .hdp import HdpTransformer # noqa: F401
from .phrases import PhrasesTransformer # noqa: F401
| 1,033 | 40.36 | 86 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/w2vmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
import six
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class W2VTransformer(TransformerMixin, BaseEstimator):
"""
Base Word2Vec module
"""
def __init__(self, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=1e-3, seed=1,
workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=10000):
"""
Sklearn wrapper for Word2Vec model. See gensim.models.Word2Vec for parameter details.
"""
self.gensim_model = None
self.size = size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.sg = sg
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.trim_rule = trim_rule
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.Word2Vec
"""
self.gensim_model = models.Word2Vec(
sentences=X, size=self.size, alpha=self.alpha,
window=self.window, min_count=self.min_count, max_vocab_size=self.max_vocab_size,
sample=self.sample, seed=self.seed, workers=self.workers, min_alpha=self.min_alpha,
sg=self.sg, hs=self.hs, negative=self.negative, cbow_mean=self.cbow_mean,
hashfxn=self.hashfxn, iter=self.iter, null_word=self.null_word, trim_rule=self.trim_rule,
sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
)
return self
def transform(self, words):
"""
Return the word-vectors for the input list of words.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(words, six.string_types):
words = [words]
vectors = [self.gensim_model[word] for word in words]
return np.reshape(np.array(vectors), (len(words), self.size))
def partial_fit(self, X):
raise NotImplementedError(
"'partial_fit' has not been implemented for W2VTransformer. "
"However, the model can be updated with a fixed vocabulary using Gensim API call."
)
| 3,225 | 35.659091 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/ldaseqmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class LdaSeqTransformer(TransformerMixin, BaseEstimator):
"""
Base LdaSeq module
"""
def __init__(self, time_slice=None, id2word=None, alphas=0.01, num_topics=10, initialize='gensim', sstats=None,
lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10, random_state=None,
lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
"""
Sklearn wrapper for LdaSeq model. See gensim.models.LdaSeqModel for parameter details.
"""
self.gensim_model = None
self.time_slice = time_slice
self.id2word = id2word
self.alphas = alphas
self.num_topics = num_topics
self.initialize = initialize
self.sstats = sstats
self.lda_model = lda_model
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.passes = passes
self.random_state = random_state
self.lda_inference_max_iter = lda_inference_max_iter
self.em_min_iter = em_min_iter
self.em_max_iter = em_max_iter
self.chunksize = chunksize
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LdaSeqModel
"""
self.gensim_model = models.LdaSeqModel(
corpus=X, time_slice=self.time_slice, id2word=self.id2word,
alphas=self.alphas, num_topics=self.num_topics, initialize=self.initialize, sstats=self.sstats,
lda_model=self.lda_model, obs_variance=self.obs_variance, chain_variance=self.chain_variance,
passes=self.passes, random_state=self.random_state, lda_inference_max_iter=self.lda_inference_max_iter,
em_min_iter=self.em_min_iter, em_max_iter=self.em_max_iter, chunksize=self.chunksize
)
return self
def transform(self, docs):
"""
Return the topic proportions for the documents passed.
The input `docs` should be in BOW format and can be a list of documents like
[[(4, 1), (7, 1)],
[(9, 1), (13, 1)], [(2, 1), (6, 1)]]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
proportions = [self.gensim_model[doc] for doc in docs]
return np.reshape(np.array(proportions), (len(docs), self.num_topics))
| 3,151 | 38.4 | 117 | py |
poincare_glove | poincare_glove-master/gensim/sklearn_api/phrases.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
from six import string_types
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class PhrasesTransformer(TransformerMixin, BaseEstimator):
"""
Base Phrases module
"""
def __init__(self, min_count=5, threshold=10.0, max_vocab_size=40000000,
delimiter=b'_', progress_per=10000, scoring='default'):
"""
Sklearn wrapper for Phrases model.
"""
self.gensim_model = None
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.delimiter = delimiter
self.progress_per = progress_per
self.scoring = scoring
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
"""
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring
)
return self
def transform(self, docs):
"""
Return the input documents to return phrase tokens.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# input as python lists
if isinstance(docs[0], string_types):
docs = [docs]
return [self.gensim_model[doc] for doc in docs]
def partial_fit(self, X):
if self.gensim_model is None:
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring
)
self.gensim_model.add_vocab(X)
return self
| 2,343 | 31.555556 | 117 | py |
poincare_glove | poincare_glove-master/gensim/similarities/docsim.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Computing similarities across a collection of documents in the Vector Space Model.
The main class is :class:`~gensim.similarity.docsim.Similarity`, which builds an index for a given set of documents.
Once the index is built, you can perform efficient queries like "Tell me how similar is this query document to each
document in the index?". The result is a vector of numbers as large as the size of the initial set of documents,
that is, one float for each index document. Alternatively, you can also request only the top-N most
similar index documents to the query.
How It Works
------------
The :class:`~gensim.similarity.docsim.Similarity` class splits the index into several smaller sub-indexes ("shards"),
which are disk-based. If your entire index fits in memory (~hundreds of thousands documents for 1GB of RAM),
you can also use the :class:`~gensim.similarity.docsim.MatrixSimilarity`
or :class:`~gensim.similarity.docsim.SparseMatrixSimilarity` classes directly.
These are more simple but do not scale as well (they keep the entire index in RAM, no sharding).
Once the index has been initialized, you can query for document similarity simply by:
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> query = [(1, 2), (6, 1), (7, 2)]
>>>
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>> similarities = index[query] # get similarities between the query and all index documents
If you have more query documents, you can submit them all at once, in a batch:
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> batch_of_documents = common_corpus[:] # only as example
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>>
>>> for similarities in index[batch_of_documents]: # the batch is simply an iterable of documents, aka gensim corpus.
... pass
The benefit of this batch (aka "chunked") querying is much better performance.
To see the speed-up on your machine, run ``python -m gensim.test.simspeed``
(compare to my results `here <http://groups.google.com/group/gensim/msg/4f6f171a869e4fca?>`_).
There is also a special syntax for when you need similarity of documents in the index
to the index itself (i.e. queries=indexed documents themselves). This special syntax
uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
>>> from gensim.test.utils import common_corpus, common_dictionary, get_tmpfile
>>>
>>> index_tmpfile = get_tmpfile("index")
>>> index = Similarity(index_tmpfile, common_corpus, num_features=len(common_dictionary)) # build the index
>>>
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
... pass
"""
import logging
import itertools
import os
import heapq
import numpy
import scipy.sparse
from gensim import interfaces, utils, matutils
from six.moves import map as imap, xrange, zip as izip
logger = logging.getLogger(__name__)
PARALLEL_SHARDS = False
try:
import multiprocessing
# by default, don't parallelize queries. uncomment the following line if you want that.
# PARALLEL_SHARDS = multiprocessing.cpu_count() # use #parallel processes = #CPus
except ImportError:
pass
class Shard(utils.SaveLoad):
"""A proxy that represents a single shard instance within :class:`~gensim.similarity.docsim.Similarity` index.
Basically just wraps :class:`~gensim.similarities.docsim.MatrixSimilarity`,
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`, etc, so that it mmaps from disk on request (query).
"""
def __init__(self, fname, index):
"""
Parameters
----------
fname : str
Path to top-level directory (file) to traverse for corpus documents.
index : :class:`~gensim.interfaces.SimilarityABC`
Index object.
"""
self.dirname, self.fname = os.path.split(fname)
self.length = len(index)
self.cls = index.__class__
logger.info("saving index shard to %s", self.fullname())
index.save(self.fullname())
self.index = self.get_index()
def fullname(self):
"""Get full path to shard file.
Return
------
str
Path to shard instance.
"""
return os.path.join(self.dirname, self.fname)
def __len__(self):
"""Get length."""
return self.length
def __getstate__(self):
"""Special handler for pickle.
Returns
-------
dict
Object that contains state of current instance without `index`.
"""
result = self.__dict__.copy()
# (S)MS objects must be loaded via load() because of mmap (simple pickle.load won't do)
if 'index' in result:
del result['index']
return result
def __str__(self):
return "%s Shard(%i documents in %s)" % (self.cls.__name__, len(self), self.fullname())
def get_index(self):
"""Load & get index.
Returns
-------
:class:`~gensim.interfaces.SimilarityABC`
Index instance.
"""
if not hasattr(self, 'index'):
logger.debug("mmaping index from %s", self.fullname())
self.index = self.cls.load(self.fullname(), mmap='r')
return self.index
def get_document_id(self, pos):
"""Get index vector at position `pos`.
Parameters
----------
pos : int
Vector position.
Return
------
{:class:`scipy.sparse.csr_matrix`, :class:`numpy.ndarray`}
Index vector. Type depends on underlying index.
Notes
-----
The vector is of the same type as the underlying index (ie., dense for
:class:`~gensim.similarities.docsim.MatrixSimilarity`
and scipy.sparse for :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
TODO: Can dense be scipy.sparse?
"""
assert 0 <= pos < len(self), "requested position out of range"
return self.get_index().index[pos]
def __getitem__(self, query):
"""Get similarities of document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document or corpus.
Returns
-------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
index = self.get_index()
try:
index.num_best = self.num_best
index.normalize = self.normalize
except Exception:
raise ValueError("num_best and normalize have to be set before querying a proxy Shard object")
return index[query]
def query_shard(args):
"""Helper for request query from shard, same as shard[query].
Parameters
---------
args : (list of (int, number), :class:`~gensim.interfaces.SimilarityABC`)
Query and Shard instances
Returns
-------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
query, shard = args # simulate starmap (not part of multiprocessing in older Pythons)
logger.debug("querying shard %s num_best=%s in process %s", shard, shard.num_best, os.getpid())
result = shard[query]
logger.debug("finished querying shard %s in process %s", shard, os.getpid())
return result
class Similarity(interfaces.SimilarityABC):
"""Compute cosine similarity of a dynamic query against a static corpus of documents ('the index').
Notes
-----
Scalability is achieved by sharding the index into smaller pieces, each of which fits into core memory
The shards themselves are simply stored as files to disk and mmap'ed back as needed.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> query = next(iter(corpus))
>>> result = index[query] # search similar to `query` in index
>>>
>>> for sims in index[corpus]: # if you have more query documents, you can submit them all at once, in a batch
... pass
>>>
>>> # There is also a special syntax for when you need similarity of documents in the index
>>> # to the index itself (i.e. queries=indexed documents themselves). This special syntax
>>> # uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
... pass
See Also
--------
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`
Index similarity (sparse with cosine distance).
:class:`~gensim.similarities.docsim.SoftCosineSimilarity`
Index similarity (with soft-cosine distance).
:class:`~gensim.similarities.docsim.WmdSimilarity`
Index similarity (with word-mover distance).
"""
def __init__(self, output_prefix, corpus, num_features, num_best=None, chunksize=256, shardsize=32768, norm='l2'):
"""
Parameters
----------
output_prefix : str
Prefix for shard filename. If None - random filename in temp will be used.
corpus : iterable of list of (int, number)
Corpus in BoW format.
num_features : int
Size of the dictionary (number of features).
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of block.
shardsize : int, optional
Size of shards should be chosen so that a `shardsize x chunksize` matrix of floats fits comfortably
into memory.
norm : {'l1', 'l2'}, optional
Normalization to use.
Notes
------------
Documents are split (internally, transparently) into shards of `shardsize` documents each, converted to matrix,
for faster BLAS calls. Each shard is stored to disk under `output_prefix.shard_number`.
If you don't specify an output prefix, a random filename in temp will be used.
If your entire index fits in memory (~hundreds of thousands
documents for 1GB of RAM), you can also use the :class:`~gensim.similarities.docsim.MatrixSimilarity`
or :class:`~gensim.similarities.docsim.SparseMatrixSimilarity` classes directly. These are more simple
but do not scale as well (they keep the entire index in RAM, no sharding).
"""
if output_prefix is None:
# undocumented feature: set output_prefix=None to create the server in temp
self.output_prefix = utils.randfname(prefix='simserver')
else:
self.output_prefix = output_prefix
logger.info("starting similarity index under %s", self.output_prefix)
self.num_features = num_features
self.num_best = num_best
self.norm = norm
self.chunksize = int(chunksize)
self.shardsize = shardsize
self.shards = []
self.fresh_docs, self.fresh_nnz = [], 0
if corpus is not None:
self.add_documents(corpus)
def __len__(self):
"""Get length of index."""
return len(self.fresh_docs) + sum([len(shard) for shard in self.shards])
def __str__(self):
return "Similarity index with %i documents in %i shards (stored under %s)" % (
len(self), len(self.shards), self.output_prefix
)
def add_documents(self, corpus):
"""Extend the index with new documents.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
Notes
-----
Internally, documents are buffered and then spilled to disk when there's `self.shardsize` of them
(or when a query is issued).
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> one_more_corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index.add_documents(one_more_corpus) # add more documents in corpus
"""
min_ratio = 1.0 # 0.5 to only reopen shards that are <50% complete
if self.shards and len(self.shards[-1]) < min_ratio * self.shardsize:
# The last shard was incomplete (<; load it back and add the documents there, don't start a new shard
self.reopen_shard()
for doc in corpus:
if isinstance(doc, numpy.ndarray):
doclen = len(doc)
elif scipy.sparse.issparse(doc):
doclen = doc.nnz
else:
doclen = len(doc)
if doclen < 0.3 * self.num_features:
doc = matutils.unitvec(matutils.corpus2csc([doc], self.num_features).T, self.norm)
else:
doc = matutils.unitvec(matutils.sparse2full(doc, self.num_features), self.norm)
self.fresh_docs.append(doc)
self.fresh_nnz += doclen
if len(self.fresh_docs) >= self.shardsize:
self.close_shard()
if len(self.fresh_docs) % 10000 == 0:
logger.info("PROGRESS: fresh_shard size=%i", len(self.fresh_docs))
def shardid2filename(self, shardid):
"""Get shard file by `shardid`.
Parameters
----------
shardid : int
Shard index.
Return
------
str
Path to shard file.
"""
if self.output_prefix.endswith('.'):
return "%s%s" % (self.output_prefix, shardid)
else:
return "%s.%s" % (self.output_prefix, shardid)
def close_shard(self):
"""Force the latest shard to close (be converted to a matrix and stored to disk).
Do nothing if no new documents added since last call.
Notes
-----
The shard is closed even if it is not full yet (its size is smaller than `self.shardsize`).
If documents are added later via :meth:`~gensim.similarities.docsim.MatrixSimilarity.add_documents`
this incomplete shard will be loaded again and completed.
"""
if not self.fresh_docs:
return
shardid = len(self.shards)
# consider the shard sparse if its density is < 30%
issparse = 0.3 > 1.0 * self.fresh_nnz / (len(self.fresh_docs) * self.num_features)
if issparse:
index = SparseMatrixSimilarity(
self.fresh_docs, num_terms=self.num_features, num_docs=len(self.fresh_docs), num_nnz=self.fresh_nnz
)
else:
index = MatrixSimilarity(self.fresh_docs, num_features=self.num_features)
logger.info("creating %s shard #%s", 'sparse' if issparse else 'dense', shardid)
shard = Shard(self.shardid2filename(shardid), index)
shard.num_best = self.num_best
shard.num_nnz = self.fresh_nnz
self.shards.append(shard)
self.fresh_docs, self.fresh_nnz = [], 0
def reopen_shard(self):
"""Reopen incomplete shard."""
assert self.shards
if self.fresh_docs:
raise ValueError("cannot reopen a shard with fresh documents in index")
last_shard = self.shards[-1]
last_index = last_shard.get_index()
logger.info("reopening an incomplete shard of %i documents", len(last_shard))
self.fresh_docs = list(last_index.index)
self.fresh_nnz = last_shard.num_nnz
del self.shards[-1] # remove the shard from index, *but its file on disk is not deleted*
logger.debug("reopen complete")
def query_shards(self, query):
"""Applying shard[query] for each shard in `self.shards`, as a sequence.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document in BoW format or corpus of documents.
Returns
-------
(None, list of ...)
Result of search.
"""
args = zip([query] * len(self.shards), self.shards)
if PARALLEL_SHARDS and PARALLEL_SHARDS > 1:
logger.debug("spawning %i query processes", PARALLEL_SHARDS)
pool = multiprocessing.Pool(PARALLEL_SHARDS)
result = pool.imap(query_shard, args, chunksize=1 + len(args) / PARALLEL_SHARDS)
else:
# serial processing, one shard after another
pool = None
result = imap(query_shard, args)
return pool, result
def __getitem__(self, query):
"""Get similarities of document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Corpus or document of corpus.
Return
------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
Notes
-----
If `query` is a corpus (iterable of documents), return a matrix of similarities of
all query documents vs. all corpus document. This batch query is more efficient than computing the similarities
one document after another.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>> import gensim.downloader as api
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> result = index[corpus] # similarities matrix
"""
self.close_shard() # no-op if no documents added to index since last query
# reset num_best and normalize parameters, in case they were changed dynamically
for shard in self.shards:
shard.num_best = self.num_best
shard.normalize = self.norm
# there are 4 distinct code paths, depending on whether input `query` is
# a corpus (or numpy/scipy matrix) or a single document, and whether the
# similarity result should be a full array or only num_best most similar
# documents.
pool, shard_results = self.query_shards(query)
if self.num_best is None:
# user asked for all documents => just stack the sub-results into a single matrix
# (works for both corpus / single doc query)
result = numpy.hstack(shard_results)
else:
# the following uses a lot of lazy evaluation and (optionally) parallel
# processing, to improve query latency and minimize memory footprint.
offsets = numpy.cumsum([0] + [len(shard) for shard in self.shards])
def convert(shard_no, doc):
return [(doc_index + offsets[shard_no], sim) for doc_index, sim in doc]
is_corpus, query = utils.is_corpus(query)
is_corpus = is_corpus or hasattr(query, 'ndim') and query.ndim > 1 and query.shape[0] > 1
if not is_corpus:
# user asked for num_best most similar and query is a single doc
results = (convert(shard_no, result) for shard_no, result in enumerate(shard_results))
result = heapq.nlargest(self.num_best, itertools.chain(*results), key=lambda item: item[1])
else:
# the trickiest combination: returning num_best results when query was a corpus
results = []
for shard_no, result in enumerate(shard_results):
shard_result = [convert(shard_no, doc) for doc in result]
results.append(shard_result)
result = []
for parts in izip(*results):
merged = heapq.nlargest(self.num_best, itertools.chain(*parts), key=lambda item: item[1])
result.append(merged)
if pool:
# gc doesn't seem to collect the Pools, eventually leading to
# "IOError 24: too many open files". so let's terminate it manually.
pool.terminate()
return result
def vector_by_id(self, docpos):
"""Get indexed vector corresponding to the document at position `docpos`.
Parameters
----------
docpos : int
Document position
Return
------
:class:`scipy.sparse.csr_matrix`
Indexed vector, internal type depends on underlying index.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>> import gensim.downloader as api
>>>
>>> # Create index:
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> vector = index.vector_by_id(1)
"""
self.close_shard() # no-op if no documents added to index since last query
pos = 0
for shard in self.shards:
pos += len(shard)
if docpos < pos:
break
if not self.shards or docpos < 0 or docpos >= pos:
raise ValueError("invalid document position: %s (must be 0 <= x < %s)" % (docpos, len(self)))
result = shard.get_document_id(docpos - pos + len(shard))
return result
def similarity_by_id(self, docpos):
"""Get similarity of the given document only by `docpos`.
Parameters
----------
docpos : int
Document position in index
Return
------
:class:`numpy.ndarray`
Similarities of document/corpus if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> similarities = index.similarity_by_id(1)
"""
query = self.vector_by_id(docpos)
norm, self.norm = self.norm, False
result = self[query]
self.norm = norm
return result
def __iter__(self):
"""For each index document in index, compute cosine similarity against all other documents in the index.
Using :meth:`~gensim.similarities.docsim.Similarity.iter_chunks`.
Yields
------
:class:`numpy.ndarray`
Similarities of document if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
# turn off query normalization (vectors in the index are already normalized, save some CPU)
norm, self.norm = self.norm, False
for chunk in self.iter_chunks():
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
self.norm = norm # restore normalization
def iter_chunks(self, chunksize=None):
"""Iteratively yield the index as chunks of documents, each of size <= chunksize.
Parameters
----------
chunksize : int, optional
Size of chunk,, if None - `self.chunksize` will be used.
Notes
-----
The chunk is returned in its raw form.
The size of the chunk may be smaller than requested; it is up to the caller to check the result for real length.
Yields
------
:class:`numpy.ndarray`
Similarities of document if index is :class:`~gensim.similarities.docsim.MatrixSimilarity` **or**
:class:`scipy.sparse.csr_matrix`
for case if index is :class:`~gensim.similarities.docsim.SparseMatrixSimilarity`.
"""
self.close_shard()
if chunksize is None:
# if not explicitly specified, use the chunksize from the constructor
chunksize = self.chunksize
for shard in self.shards:
query = shard.get_index().index
for chunk_start in xrange(0, query.shape[0], chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(query.shape[0], chunk_start + chunksize)
chunk = query[chunk_start: chunk_end] # create a view
yield chunk
def check_moved(self):
"""Update shard locations (for case if the server directory has moved on filesystem)."""
dirname = os.path.dirname(self.output_prefix)
for shard in self.shards:
shard.dirname = dirname
def save(self, fname=None, *args, **kwargs):
"""Save the object via pickling (also see load) under filename specified in the constructor.
Parameters
----------
fname : str, optional
Path for save index, if not provided - will be saved to `self.output_prefix`.
*args : object
Arguments, look at :meth:`gensim.interfaces.SimilarityABC.save`.
**kwargs : object
Keyword arguments, look at :meth:`gensim.interfaces.SimilarityABC.save`.
Notes
-----
Call :meth:`~gensim.similarities.Similarity.close_shard` internally to spill unfinished shards to disk first.
Examples
--------
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> temp_fname = get_tmpfile("index")
>>> output_fname = get_tmpfile("saved_index")
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity(temp_fname, corpus, num_features=400)
>>>
>>> index.save(output_fname)
>>> loaded_index = index.load(output_fname)
"""
self.close_shard()
if fname is None:
fname = self.output_prefix
super(Similarity, self).save(fname, *args, **kwargs)
def destroy(self):
"""Delete all files under self.output_prefix, object is not usable after calling this method anymore."""
import glob
for fname in glob.glob(self.output_prefix + '*'):
logger.info("deleting %s", fname)
os.remove(fname)
class MatrixSimilarity(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Unless the entire matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Examples
--------
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.similarities import MatrixSimilarity
>>>
>>> query = [(1, 2), (5, 4)]
>>> index = MatrixSimilarity(common_corpus, num_features=len(common_dictionary))
>>> sims = index[query]
"""
def __init__(self, corpus, num_best=None, dtype=numpy.float32, num_features=None, chunksize=256, corpus_len=None):
"""
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
dtype : numpy.dtype
Datatype of internal matrix
num_features : int, optional
Size of the dictionary.
chunksize : int, optional
Size of chunk.
corpus_len : int, optional
Size of `corpus`, if not specified - will scan corpus to determine size.
"""
if num_features is None:
logger.warning(
"scanning corpus to determine the number of features (consider setting `num_features` explicitly)"
)
num_features = 1 + utils.get_max_id(corpus)
self.num_features = num_features
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
if corpus_len is None:
corpus_len = len(corpus)
if corpus is not None:
if self.num_features <= 0:
raise ValueError(
"cannot index a corpus with zero features (you must specify either `num_features` "
"or a non-empty corpus in the constructor)"
)
logger.info("creating matrix with %i documents and %i features", corpus_len, num_features)
self.index = numpy.empty(shape=(corpus_len, num_features), dtype=dtype)
# iterate over corpus, populating the numpy index matrix with (normalized)
# document vectors
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.debug("PROGRESS: at document #%i/%i", docno, corpus_len)
# individual documents in fact may be in numpy.scipy.sparse format as well.
# it's not documented because other it's not fully supported throughout.
# the user better know what he's doing (no normalization, must
# explicitly supply num_features etc).
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = matutils.unitvec(matutils.sparse2full(vector, num_features))
self.index[docno] = vector
def __len__(self):
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and current index instance.
Warnings
--------
Do not use this function directly, use the :class:`~gensim.similarities.docsim.MatrixSimilarity.__getitem__`
instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = numpy.asarray(
[matutils.sparse2full(vec, self.num_features) for vec in query],
dtype=self.index.dtype
)
else:
if scipy.sparse.issparse(query):
query = query.toarray() # convert sparse to dense
elif isinstance(query, numpy.ndarray):
pass
else:
# default case: query is a single vector in sparse gensim format
query = matutils.sparse2full(query, self.num_features)
query = numpy.asarray(query, dtype=self.index.dtype)
# do a little transposition dance to stop numpy from making a copy of
# self.index internally in numpy.dot (very slow).
result = numpy.dot(self.index, query.T).T # return #queries x #index
return result # XXX: removed casting the result from array to list; does anyone care?
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.index.shape[1])
class SoftCosineSimilarity(interfaces.SimilarityABC):
"""Compute soft cosine similarity against a corpus of documents by storing the index matrix in memory.
Examples
--------
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import SoftCosineSimilarity
>>>
>>> model = Word2Vec(common_texts, size=20, min_count=1) # train word-vectors
>>> dictionary = Dictionary(common_texts)
>>> bow_corpus = [dictionary.doc2bow(document) for document in common_texts]
>>>
>>> similarity_matrix = model.wv.similarity_matrix(dictionary) # construct similarity matrix
>>> index = SoftCosineSimilarity(bow_corpus, similarity_matrix, num_best=10)
>>>
>>> # Make a query.
>>> query = 'graph trees computer'.split()
>>> # calculate similarity between query and each doc from bow_corpus
>>> sims = index[dictionary.doc2bow(query)]
Check out `Tutorial Notebook
<https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/soft_cosine_tutorial.ipynb>`_
for more examples.
"""
def __init__(self, corpus, similarity_matrix, num_best=None, chunksize=256):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
similarity_matrix : :class:`scipy.sparse.csc_matrix`
A term similarity matrix, typically produced by
:meth:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity_matrix`.
num_best : int, optional
The number of results to retrieve for a query, if None - return similarities with all elements from corpus.
chunksize: int, optional
Size of one corpus chunk.
See Also
--------
:meth:`gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity_matrix`
A term similarity matrix produced from term embeddings.
:func:`gensim.matutils.softcossim`
The Soft Cosine Measure.
"""
self.corpus = corpus
self.similarity_matrix = similarity_matrix
self.num_best = num_best
self.chunksize = chunksize
# Normalization of features is undesirable, since soft cosine similarity requires special
# normalization using the similarity matrix. Therefore, we would just be normalizing twice,
# increasing the numerical error.
self.normalize = False
# index is simply an array from 0 to size of corpus.
self.index = numpy.arange(len(corpus))
def __len__(self):
return len(self.corpus)
def get_similarities(self, query):
"""Get similarity between `query` and current index instance.
Warnings
--------
Do not use this function directly; use the self[query] syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
if isinstance(query, numpy.ndarray):
# Convert document indexes to actual documents.
query = [self.corpus[i] for i in query]
if not query or not isinstance(query[0], list):
query = [query]
n_queries = len(query)
result = []
for qidx in range(n_queries):
# Compute similarity for each query.
qresult = [matutils.softcossim(document, query[qidx], self.similarity_matrix)
for document in self.corpus]
qresult = numpy.array(qresult)
# Append single query result to list of all results.
result.append(qresult)
if len(result) == 1:
# Only one query.
result = result[0]
else:
result = numpy.array(result)
return result
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.similarity_matrix.shape[0])
class WmdSimilarity(interfaces.SimilarityABC):
"""Compute negative WMD similarity against a corpus of documents by storing the index matrix in memory.
See :class:`~gensim.models.keyedvectors.WordEmbeddingsKeyedVectors` for more information.
Also, tutorial `notebook
<https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/WMD_tutorial.ipynb>`_ for more examples.
When using this code, please consider citing the following papers:
`Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching"
<http://www.cs.huji.ac.il/~werman/Papers/ECCV2008.pdf>`_, `Ofir Pele and Michael Werman, "Fast and robust earth
mover's distances" <http://www.cs.huji.ac.il/~werman/Papers/ICCV2009.pdf>`_, `"Matt Kusner et al. "From Word
Embeddings To Document Distances" <http://proceedings.mlr.press/v37/kusnerb15.pdf>`_.
Example
-------
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import WmdSimilarity
>>>
>>> model = Word2Vec(common_texts, size=20, min_count=1) # train word-vectors
>>> dictionary = Dictionary(common_texts)
>>> bow_corpus = [dictionary.doc2bow(document) for document in common_texts]
>>>
>>> index = WmdSimilarity(bow_corpus, model)
>>> # Make query.
>>> query = 'trees'
>>> sims = index[query]
"""
def __init__(self, corpus, w2v_model, num_best=None, normalize_w2v_and_replace=True, chunksize=256):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
w2v_model: :class:`~gensim.models.word2vec.Word2VecTrainables`
A trained word2vec model.
num_best: int, optional
Number of results to retrieve.
normalize_w2v_and_replace: bool, optional
Whether or not to normalize the word2vec vectors to length 1.
chunksize : int, optional
Size of chunk.
"""
self.corpus = corpus
self.w2v_model = w2v_model
self.num_best = num_best
self.chunksize = chunksize
# Normalization of features is not possible, as corpus is a list (of lists) of strings.
self.normalize = False
# index is simply an array from 0 to size of corpus.
self.index = numpy.arange(len(corpus))
if normalize_w2v_and_replace:
# Normalize vectors in word2vec class to length 1.
w2v_model.init_sims(replace=True)
def __len__(self):
"""Get size of corpus."""
return len(self.corpus)
def get_similarities(self, query):
"""Get similarity between `query` and current index instance.
Warnings
--------
Do not use this function directly; use the self[query] syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
if isinstance(query, numpy.ndarray):
# Convert document indexes to actual documents.
query = [self.corpus[i] for i in query]
if not query or not isinstance(query[0], list):
query = [query]
n_queries = len(query)
result = []
for qidx in range(n_queries):
# Compute similarity for each query.
qresult = [self.w2v_model.wmdistance(document, query[qidx]) for document in self.corpus]
qresult = numpy.array(qresult)
qresult = 1. / (1. + qresult) # Similarity is the negative of the distance.
# Append single query result to list of all results.
result.append(qresult)
if len(result) == 1:
# Only one query.
result = result[0]
else:
result = numpy.array(result)
return result
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.w2v_model.wv.syn0.shape[1])
class SparseMatrixSimilarity(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Notes
-----
Use this if your input corpus contains sparse vectors (such as documents in bag-of-words format) and fits into RAM.
The matrix is internally stored as a :class:`scipy.sparse.csr_matrix` matrix. Unless the entire
matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Takes an optional `maintain_sparsity` argument, setting this to True
causes `get_similarities` to return a sparse matrix instead of a
dense representation if possible.
See also
--------
:class:`~gensim.similarities.docsim.Similarity`
Index similarity (wrapper for other inheritors of :class:`~gensim.interfaces.SimilarityABC`).
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
"""
def __init__(self, corpus, num_features=None, num_terms=None, num_docs=None, num_nnz=None,
num_best=None, chunksize=500, dtype=numpy.float32, maintain_sparsity=False):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
num_features : int, optional
Size of the dictionary.
num_terms : int, optional
Number of terms, **must be specified**.
num_docs : int, optional
Number of documents in `corpus`.
num_nnz : int, optional
Number of non-zero terms.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of chunk.
dtype : numpy.dtype, optional
Data type of internal matrix.
maintain_sparsity : bool, optional
if True - will return sparse arr from
:meth:`~gensim.similarities.docsim.SparseMatrixSimilarity.get_similarities`.
"""
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
self.maintain_sparsity = maintain_sparsity
if corpus is not None:
logger.info("creating sparse index")
# iterate over input corpus, populating the sparse index matrix
try:
# use the more efficient corpus generation version, if the input
# `corpus` is MmCorpus-like (knows its shape and number of non-zeroes).
num_terms, num_docs, num_nnz = corpus.num_terms, corpus.num_docs, corpus.num_nnz
logger.debug("using efficient sparse index creation")
except AttributeError:
# no MmCorpus, use the slower version (or maybe user supplied the
# num_* params in constructor)
pass
if num_features is not None:
# num_terms is just an alias for num_features, for compatibility with MatrixSimilarity
num_terms = num_features
if num_terms is None:
raise ValueError("refusing to guess the number of sparse features: specify num_features explicitly")
corpus = (matutils.scipy2sparse(v) if scipy.sparse.issparse(v) else
(matutils.full2sparse(v) if isinstance(v, numpy.ndarray) else
matutils.unitvec(v)) for v in corpus)
self.index = matutils.corpus2csc(
corpus, num_terms=num_terms, num_docs=num_docs, num_nnz=num_nnz,
dtype=dtype, printprogress=10000
).T
# convert to Compressed Sparse Row for efficient row slicing and multiplications
self.index = self.index.tocsr() # currently no-op, CSC.T is already CSR
logger.info("created %r", self.index)
def __len__(self):
"""Get size of index."""
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and current index instance.
Warnings
--------
Do not use this function directly; use the self[query] syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix (if maintain_sparsity=False) **OR**
:class:`scipy.sparse.csc`
otherwise
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = matutils.corpus2csc(query, self.index.shape[1], dtype=self.index.dtype)
else:
if scipy.sparse.issparse(query):
query = query.T # convert documents=rows to documents=columns
elif isinstance(query, numpy.ndarray):
if query.ndim == 1:
query.shape = (1, len(query))
query = scipy.sparse.csr_matrix(query, dtype=self.index.dtype).T
else:
# default case: query is a single vector, in sparse gensim format
query = matutils.corpus2csc([query], self.index.shape[1], dtype=self.index.dtype)
# compute cosine similarity against every other document in the collection
result = self.index * query.tocsc() # N x T * T x C = N x C
if result.shape[1] == 1 and not is_corpus:
# for queries of one document, return a 1d array
result = result.toarray().flatten()
elif self.maintain_sparsity:
# avoid converting to dense array if maintaining sparsity
result = result.T
else:
# otherwise, return a 2d matrix (#queries x #index)
result = result.toarray().T
return result
| 48,317 | 38.735197 | 120 | py |
poincare_glove | poincare_glove-master/gensim/similarities/__init__.py | """
This package contains implementations of pairwise similarity queries.
"""
# bring classes directly into package namespace, to save some typing
from .docsim import Similarity, MatrixSimilarity, SparseMatrixSimilarity, SoftCosineSimilarity, WmdSimilarity # noqa:F401
| 271 | 37.857143 | 122 | py |
poincare_glove | poincare_glove-master/gensim/similarities/index.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Intro
-----
This module contains integration Annoy with :class:`~gensim.models.word2vec.Word2Vec`,
:class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText` and
:class:`~gensim.models.keyedvectors.KeyedVectors`.
What is Annoy
-------------
Annoy (Approximate Nearest Neighbors Oh Yeah) is a C++ library with Python bindings to search for points in space
that are close to a given query point. It also creates large read-only file-based data structures that are mmapped
into memory so that many processes may share the same data.
How it works
------------
Using `random projections <https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Random_projection>`_
and by building up a tree. At every intermediate node in the tree, a random hyperplane is chosen,
which divides the space into two subspaces. This hyperplane is chosen by sampling two points from the subset
and taking the hyperplane equidistant from them.
More information about Annoy: `github repository <https://github.com/spotify/annoy>`_,
`author in twitter <https://twitter.com/fulhack>`_
and `annoy-user maillist <https://groups.google.com/forum/#!forum/annoy-user>`_.
"""
import os
from smart_open import smart_open
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec
from gensim.models.fasttext import FastText
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import WordEmbeddingsKeyedVectors
try:
from annoy import AnnoyIndex
except ImportError:
raise ImportError(
"Annoy has not been installed, if you wish to use the annoy indexer, please run `pip install annoy`"
)
class AnnoyIndexer(object):
"""This class allows to use `Annoy <https://github.com/spotify/annoy>`_ as indexer for `most_similar` method
from :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` classes.
"""
def __init__(self, model=None, num_trees=None):
"""
Parameters
----------
model : :class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel`, optional
Model, that will be used as source for index.
num_trees : int, optional
Number of trees for Annoy indexer.
Examples
--------
>>> from gensim.similarities.index import AnnoyIndexer
>>> from gensim.models import Word2Vec
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, seed=1)
>>>
>>> indexer = AnnoyIndexer(model, 2)
>>> model.most_similar("cat", topn=2, indexer=indexer)
[('cat', 1.0), ('dog', 0.32011348009109497)]
"""
self.index = None
self.labels = None
self.model = model
self.num_trees = num_trees
if model and num_trees:
if isinstance(self.model, Doc2Vec):
self.build_from_doc2vec()
elif isinstance(self.model, (Word2Vec, FastText)):
self.build_from_word2vec()
elif isinstance(self.model, (WordEmbeddingsKeyedVectors, KeyedVectors)):
self.build_from_keyedvectors()
else:
raise ValueError("Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used")
def save(self, fname, protocol=2):
"""Save AnnoyIndexer instance.
Parameters
----------
fname : str
Path to output file, will produce 2 files: `fname` - parameters and `fname`.d - :class:`~annoy.AnnoyIndex`.
protocol : int, optional
Protocol for pickle.
Notes
-----
This method save **only** index (**model isn't preserved**).
"""
fname_dict = fname + '.d'
self.index.save(fname)
d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}
with smart_open(fname_dict, 'wb') as fout:
_pickle.dump(d, fout, protocol=protocol)
def load(self, fname):
"""Load AnnoyIndexer instance
Parameters
----------
fname : str
Path to dump with AnnoyIndexer.
Examples
--------
>>> from gensim.similarities.index import AnnoyIndexer
>>> from gensim.models import Word2Vec
>>> from tempfile import mkstemp
>>>
>>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
>>> model = Word2Vec(sentences, min_count=1, seed=1, iter=10)
>>>
>>> indexer = AnnoyIndexer(model, 2)
>>> _, temp_fn = mkstemp()
>>> indexer.save(temp_fn)
>>>
>>> new_indexer = AnnoyIndexer()
>>> new_indexer.load(temp_fn)
>>> new_indexer.model = model
"""
fname_dict = fname + '.d'
if not (os.path.exists(fname) and os.path.exists(fname_dict)):
raise IOError(
"Can't find index files '%s' and '%s' - Unable to restore AnnoyIndexer state." % (fname, fname_dict)
)
else:
with smart_open(fname_dict) as f:
d = _pickle.loads(f.read())
self.num_trees = d['num_trees']
self.index = AnnoyIndex(d['f'])
self.index.load(fname)
self.labels = d['labels']
def build_from_word2vec(self):
"""Build an Annoy index using word vectors from a Word2Vec model."""
self.model.init_sims()
return self._build_from_model(self.model.wv.vectors_norm, self.model.wv.index2word, self.model.vector_size)
def build_from_doc2vec(self):
"""Build an Annoy index using document vectors from a Doc2Vec model."""
docvecs = self.model.docvecs
docvecs.init_sims()
labels = [docvecs.index_to_doctag(i) for i in range(0, docvecs.count)]
return self._build_from_model(docvecs.vectors_docs_norm, labels, self.model.vector_size)
def build_from_keyedvectors(self):
"""Build an Annoy index using word vectors from a KeyedVectors model."""
self.model.init_sims()
return self._build_from_model(self.model.syn0norm, self.model.index2word, self.model.vector_size)
def _build_from_model(self, vectors, labels, num_features):
index = AnnoyIndex(num_features)
for vector_num, vector in enumerate(vectors):
index.add_item(vector_num, vector)
index.build(self.num_trees)
self.index = index
self.labels = labels
def most_similar(self, vector, num_neighbors):
"""Find the approximate `num_neighbors` most similar items.
Parameters
----------
vector : numpy.array
Vector for word/document.
num_neighbors : int
Number of most similar items
Returns
-------
list of (str, float)
List of most similar items in format [(`item`, `cosine_distance`), ... ]
"""
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
| 7,557 | 34.990476 | 119 | py |
poincare_glove | poincare_glove-master/glove_code/setup.py | from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy as np
NAME = "glove"
VERSION = "0.1"
DESCR = "Python implementation of GloVe"
URL = "http://www.google.com"
REQUIRES = ['numpy', 'cython']
AUTHOR = "Alexandru Tifrea"
EMAIL = "tifreaa@ethz.ch"
LICENSE = "Apache 2.0"
SRC_DIR = "src"
PACKAGES = [SRC_DIR]
ext_1 = Extension(SRC_DIR + "/glove_inner",
[SRC_DIR + "/glove_inner.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
libraries=[],
include_dirs=[np.get_include()])
EXTENSIONS = [ext_1]
if __name__ == "__main__":
setup(install_requires=REQUIRES,
packages=PACKAGES,
zip_safe=False,
name=NAME,
version=VERSION,
description=DESCR,
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
cmdclass={"build_ext": build_ext},
ext_modules=EXTENSIONS,
package_data={'': ['*.pyx', '*.pxd', '*.h']},
include_package_data=True
)
| 1,130 | 24.133333 | 55 | py |
poincare_glove | poincare_glove-master/glove_code/src/glove.py | from gensim.models.word2vec import WordEmbeddingCheckpoints
from gensim.utils import SaveLoad
from gensim.models.keyedvectors import VanillaWordEmbeddingsKeyedVectors, PoincareWordEmbeddingsKeyedVectors, Vocab, \
MixPoincareWordEmbeddingsKeyedVectors
from glove_code.src.glove_inner import read_all
from glove_code.src.utils import is_number
import logging
from math import log
from numpy import array, newaxis, random, empty, zeros, ones, float32 as REAL, dot, sqrt, std, mean, uint32, copy
from numpy.linalg import norm
import threading
from timeit import default_timer
logger = logging.getLogger(__name__)
X_MAX = 100.0
ALPHA = 0.75
MAX_ABS_VALUE_EMB_INIT = 0.001
vocab_lock = threading.Lock()
try:
from glove_code.src.glove_inner import train_glove_epoch
except ImportError:
logger.warning("COULD NOT IMPORT CYTHON FUNCTION")
# TODO: update according to the code in glove_inner.pyx, if you intend to use this.
# XXX: this is not needed unless the cython version cannot be imported. If the python function is used instead of
# the cython one, then training would be extremely(!) slow.
def train_glove_epoch(model, batch_data, compute_loss):
batch_loss = 0.0
pair_tally = 0
for data in batch_data:
word1_index, word2_index, occ_count = data
# Compute difference between log co-occ count and model.
if model.with_bias:
diff = dot(model.wv.syn0[word1_index], model.syn1[word2_index]) + model.b0[word1_index] + model.b1[word2_index] - log(occ_count)
else:
diff = dot(model.wv.syn0[word1_index], model.syn1[word2_index]) - log(occ_count)
fdiff = (1.0 if occ_count > X_MAX else pow(occ_count / X_MAX, ALPHA)) * diff
if not is_number(diff) or not is_number(fdiff):
logger.warning("NaN or inf encountered in {}, {}".format(diff, fdiff))
continue
if compute_loss:
# Accumulate loss.
batch_loss += 0.5 * fdiff * diff
# AdaGrad updates.
fdiff *= model.lr # for ease in calculating gradient
grad0 = fdiff * model.syn1[word2_index]
grad1 = fdiff * model.wv.syn0[word1_index]
# Update embeddings.
model.wv.syn0[word1_index] -= 1.0 / sqrt(model.gradsq_syn0[word1_index]) * grad0
model.syn1[word2_index] -= 1.0 / sqrt(model.gradsq_syn1[word2_index]) * grad1
model.gradsq_syn0[word1_index] += norm(grad0)**2
model.gradsq_syn1[word2_index] += norm(grad1)**2
if model.with_bias:
# Update biases.
model.b0[word1_index] -= 1.0 / sqrt(model.gradsq_b0[word1_index]) * fdiff
model.b1[word2_index] -= 1.0 / sqrt(model.gradsq_b1[word2_index]) * fdiff
model.gradsq_b0[word1_index] += fdiff * fdiff
model.gradsq_b1[word2_index] += fdiff * fdiff
pair_tally += 1
return pair_tally, batch_loss
class Glove(SaveLoad):
def __init__(self, use_glove_format, coocc_file, vocab_file, restrict_vocab, num_workers=5, chunksize=100, epochs=5,
optimizer=None, lr=0.05, vector_size=100, vector_dtype=REAL, poincare=0, euclid=1, dist_func=None,
cosh_dist_pow=0, num_embs=0, nn_config=None, coocc_func="log", use_scaling=False, seed=1,
compute_loss=True, with_bias=False, use_log_probs=False, ckpt_word_list=None, init_near_border=False,
init_pretrained_config=None, callbacks=None):
self.use_glove_format = use_glove_format
self.coocc_file = coocc_file
self.num_workers = num_workers
self.chunksize = chunksize
self.epochs = epochs
self.optimizer = optimizer
self.lr = lr
self.vector_size = vector_size
self.vector_dtype = vector_dtype
self.compute_loss = compute_loss
self.with_bias = with_bias
self.use_log_probs = use_log_probs
self.total_train_time = 0
self.epoch_training_loss = 0.0
self.trained_pair_count = 0
self.num_projections = 0
self.init_near_border = init_near_border
self.init_pretrained_config = init_pretrained_config
self.callbacks = callbacks
self.finished_training = False
self.poincare = poincare
self.euclid = euclid
self.dist_func = dist_func
self.cosh_dist_pow = cosh_dist_pow
self.num_embs = num_embs
self.nn_config = nn_config
self.coocc_func = coocc_func
self.use_scaling = use_scaling
if self.use_scaling:
self.scaling_factor = 1.0 + (random.rand() * 2 * 0.01 - 0.01)
print("Initial scaling factor is {}".format(self.scaling_factor))
self.emb_type = "vanilla"
if self.poincare == 1:
self.emb_type = "poincare"
elif self.euclid == 1:
self.emb_type = "euclid"
self.max_word_index = restrict_vocab
self.vocab_size = 0
if self.emb_type == "poincare":
if num_embs > 0:
self.wv = MixPoincareWordEmbeddingsKeyedVectors(vector_size, num_embs=num_embs, vector_dtype=vector_dtype,
init_near_border=init_near_border,
init_pretrained_config=init_pretrained_config)
else:
self.wv = PoincareWordEmbeddingsKeyedVectors(vector_size, vector_dtype=vector_dtype,
init_near_border=init_near_border,
init_pretrained_config=init_pretrained_config)
else:
self.wv = VanillaWordEmbeddingsKeyedVectors(vector_size, vector_dtype=vector_dtype,
init_pretrained_config=init_pretrained_config)
self.load_vocab(vocab_file, restrict_vocab)
self.num_pairs = 0
self.inspect_training_corpus()
self.trainables = GloveTrainables(self.vocab_size, self.vector_size, self.vector_dtype, seed, self.with_bias,
self.nn_config)
self.wv.trainables = self.trainables
if ckpt_word_list:
self.word_checkpoints = WordEmbeddingCheckpoints(ckpt_word_list, self)
# Initialize embeddings.
self.trainables.init_embeddings(self.wv)
# Train embeddings.
self.train()
def _log_epoch_end(self, cur_epoch, elapsed):
_, rw_spearman_corr, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/rare_word.txt')
_, wordsim_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/wordsim353.tsv')
_, simlex_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/simlex999.txt')
_, mturk_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/mturk771.tsv')
_, simverb_spearman, _ = self.wv.evaluate_word_pairs('../msc_tifreaa/gensim/test/test_data/simverb3500.tsv')
# Compute embedding norms for top 10 most frequent words and least frequent words (words outside top 1000)
target_norms = array([norm(self.wv.word_vec(w)) for w in self.wv.index2entity[:self.vocab_size]])
context_norms = array([norm(self.trainables.syn1neg[idx]) for idx in range(self.vocab_size)])
top10_avg_norm_target, top10_avg_norm_context = mean(target_norms[:10]), mean(context_norms[:10])
not_top1000_avg_norm_target, not_top1000_avg_norm_context = mean(target_norms[1000:]), mean(context_norms[1000:])
print_string = "EPOCH - {:d} : training on {:d} pairs took {:.1f}s, {:.0f} pairs/s, epoch loss {:f}\n\t- Epoch {:d} Similarity: rareword {:.4f}, wordsim {:.4f}, simlex {:.4f}, mturk {:.4f}, simverb {:.4f}, top10_avg_norm {:.4f} / {:.4f}, last_avg_norm {:.4f} / {:.4f}, norm_stddev {:.4f}/{:.4f}".format(
cur_epoch + 1, self.trained_pair_count, elapsed, self.trained_pair_count / elapsed,
float(self.epoch_training_loss), cur_epoch + 1,
rw_spearman_corr[0], wordsim_spearman[0], simlex_spearman[0], mturk_spearman[0], simverb_spearman[0],
top10_avg_norm_target, top10_avg_norm_context,
not_top1000_avg_norm_target, not_top1000_avg_norm_context,
std(target_norms), std(context_norms))
if not self.with_bias:
print_string += ", mean bias {:.4f}, variance bias {:.4f}".format(
self.trainables.mean_bias, self.trainables.var_bias
)
if self.use_scaling:
print_string += ", scaling_factor {:.4f}".format(self.scaling_factor)
print(print_string)
logger.info(print_string)
if (cur_epoch + 1) % 5 == 0:
self.prepare_emb_for_eval()
# XXX: we currently compute analogy using 3COSADD and cosine distance because it is a lot faster than using
# Moebius parallel transport, and we don't want to slow down training.
# Note that this only happens for logging the progress during training. For evaluating an already trained
# model, any of the supported analogy functions can be chosen.
old_value = getattr(self.wv, "use_poincare_distance") if hasattr(self.wv, "use_poincare_distance") else False
self.wv.use_poincare_distance = False
google_analogy_eval = self.wv.accuracy(
'gensim/test/test_data/questions-words.txt',
restrict_vocab=400000,
most_similar=VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy,
verbose=False)
msr_analogy_eval = self.wv.accuracy(
'../msc_tifreaa/gensim/test/test_data/msr_word_relationship.processed',
restrict_vocab=400000,
most_similar=VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy,
verbose=False)
print_string = "\t- Epoch {:d} Analogy: Google {:.4f}, MSR {:.4f}".format(
cur_epoch + 1, google_analogy_eval[-1]['correct'][0] / 19544,
msr_analogy_eval[-1]['correct'][0] / 8000)
print(print_string)
logger.info(print_string)
self.wv.use_poincare_distance = old_value
# Reset vector_norms so that they are computed again next time when we want to evaluate analogy.
self.wv.vectors_norm = None
def _log_train_end(self, total_elapsed):
logger.info(
"training on %i pairs took %.1fs, %.0f pairs/s",
self.trained_pair_count, total_elapsed, self.trained_pair_count / total_elapsed
)
def _train_epoch(self, cur_epoch):
"""Train one epoch."""
start = default_timer() - 0.00001
self.epoch_training_loss, self.trained_pair_count = train_glove_epoch(self)
elapsed = default_timer() - start
# We need a lock here, because the self.wv.vocab will be changed during the similarity evaluation, which means
# that other threads that might be using vocab (e.g. ckpt_worker) may run into race conditions.
vocab_lock.acquire()
self._log_epoch_end(cur_epoch, elapsed)
vocab_lock.release()
def train(self):
start = default_timer() - 0.00001
for callback in self.callbacks:
callback.on_train_begin(self)
# Start worker that will save model checkpoints.
if hasattr(self, "word_checkpoints"):
print("Starting model checkpoint thread")
ckpt_worker = threading.Thread(target=self.run_model_ckpt_job)
ckpt_worker.daemon = True
ckpt_worker.start()
for cur_epoch in range(self.epochs):
self._train_epoch(cur_epoch)
# Log overall time
total_elapsed = default_timer() - start
self.finished_training = True
if hasattr(self, "word_checkpoints"):
ckpt_worker.join()
print("Model checkpoint thread finished working")
self._log_train_end(total_elapsed)
for callback in self.callbacks:
callback.on_train_end(self)
def load_vocab(self, vocab_file, restrict_vocab):
# Read vocab.
with open(vocab_file, "r") as f:
self.vocab_size = 0
self.wv.index2freq = []
all_lines = f.readlines()[:restrict_vocab] if restrict_vocab > 0 else f.readlines()
for index, line in enumerate(all_lines):
if self.use_glove_format:
word, count = line.strip().split(" ") # vocab is indexed from 0; for co-occ we use 1-based indexing
index = index
else:
index, word, count = line.strip().split("\t")
index = int(index) - 1 # indexing starts at 1 in the file; for co-occ we use 0-based indexing
self.wv.index2word.append(word)
self.wv.vocab[word] = Vocab(index=index, count=int(count))
self.wv.index2freq.append(count)
self.vocab_size += 1
self.wv.index2freq = array(self.wv.index2freq).astype(uint32)
self.wv.vector_size = self.vector_size
self.wv.vector_dtype = self.vector_dtype
# Unused members from VanillaWordEmbeddingsKeyedVectors.
self.wv.vectors_norm = None
print("Loaded vocabulary with {} words".format(self.vocab_size))
def inspect_training_corpus(self):
self.num_pairs = read_all(self.use_glove_format, self.coocc_file)
print("Finished first traversal of corpus. Detected a total of {} pairs".format(self.num_pairs))
def run_model_ckpt_job(self):
ckpt_delay, next_ckpt = 0.5, 1.0
start = default_timer() - 0.00001
while not self.finished_training:
elapsed = default_timer() - start
if elapsed >= next_ckpt:
vocab_lock.acquire()
self.word_checkpoints.add_checkpoints()
vocab_lock.release()
next_ckpt = elapsed + ckpt_delay
def prepare_emb_for_eval(self):
if self.emb_type == "poincare":
pass
# TODO: need to change keyedvectors so that they use agg_vectors for logging the training of Poincare embeddings
# Note that this method is only invoked when logging the progress during training, and not when running the
# evaluation of an already trained model.
# self.wv.agg_vectors = self.wv.moebius_mul_mat(agg, 0.5)
else:
self.wv.vectors_norm = self.wv.vectors + self.trainables.syn1
self.wv.vectors_norm = (self.wv.vectors_norm / sqrt((self.wv.vectors_norm ** 2).sum(-1))[..., newaxis]).astype(self.vector_dtype)
def cleanup(self):
# Remove references to auxiliary variables that are memory heavy.
self.trainables.gradsq_syn0, self.trainables.gradsq_syn1 = None, None
self.trainables.gradsq_b0, self.trainables.gradsq_b1 = None, None
def get_attr(self, attr_name, default=0):
return getattr(self, attr_name) if hasattr(self, attr_name) else default
class GloveTrainables(SaveLoad):
def __init__(self, vocab_size, vector_size, vector_dtype, seed, with_bias, nn_config):
self.vocab_size = vocab_size
self.vector_size = vector_size
self.vector_dtype = vector_dtype
self.with_bias = with_bias
self.seed = seed
# Word embeddings and biases.
self.syn1 = empty((vocab_size, vector_size), dtype=vector_dtype)
if with_bias:
self.b0 = empty((vocab_size,), dtype=vector_dtype)
self.b1 = empty((vocab_size,), dtype=vector_dtype)
else:
# Even if we don't have one bias per word embedding, use a global mean and variance bias that "normalizes"
# the model distribution and the log co-occ count distribution and brings them to the same domain.
self.mean_bias = 0.0
self.var_bias = 0.0
# Arrays for accumulating the sum of the squared gradients during training, for AdaGrad.
# Initialize with 1.0, so that the initial value of eta is equal to initial learning rate.
self.gradsq_syn0 = ones((vocab_size, vector_size), dtype=vector_dtype)
self.gradsq_syn1 = ones((vocab_size, vector_size), dtype=vector_dtype)
if with_bias:
self.gradsq_b0 = ones((vocab_size,), dtype=vector_dtype)
self.gradsq_b1 = ones((vocab_size,), dtype=vector_dtype)
else:
self.gradsq_mean_bias = 1.0
self.gradsq_var_bias = 1.0
# Arrays for accumulating momentum for AMSgrad.
self.mom_syn0 = zeros((vocab_size, vector_size), dtype=vector_dtype)
self.mom_syn1 = zeros((vocab_size, vector_size), dtype=vector_dtype)
if with_bias:
self.mom_b0 = zeros((vocab_size,), dtype=vector_dtype)
self.mom_b1 = zeros((vocab_size,), dtype=vector_dtype)
else:
self.mom_mean_bias = 0.0
self.mom_var_bias = 0.0
# Arrays for keeping track of coefficients for AMSgrad.
self.betas_syn0 = ones((vocab_size, vector_size), dtype=vector_dtype)
self.betas_syn1 = ones((vocab_size, vector_size), dtype=vector_dtype)
if with_bias:
self.betas_b0 = ones((vocab_size,), dtype=vector_dtype)
self.betas_b1 = ones((vocab_size,), dtype=vector_dtype)
else:
self.betas_mean_bias = 1.0
self.betas_var_bias = 1.0
# For compatibility with the evaluation code for SGNS, we also add syn1neg.
self.syn1neg = self.syn1
self.nn_weights = None
if nn_config:
self.nn_weights = empty((3, nn_config.num_nodes), dtype=vector_dtype)
self.nn_output_bias = empty((1, 1), dtype=vector_dtype)
def init_embeddings(self, wv):
wv.vectors = empty((self.vocab_size, self.vector_size), dtype=self.vector_dtype)
start = 0
if wv.init_pretrained_config:
# Initialize from pretrained model.
for i in range(min(wv.init_pretrained_config.vocab_size, self.vocab_size)):
wv.vectors[i] = copy(wv.init_pretrained_config.init_vectors[i])
self.syn1[i] = copy(wv.init_pretrained_config.init_syn1[i])
self.b0[i] = copy(wv.init_pretrained_config.init_b0[i])
self.b1[i] = copy(wv.init_pretrained_config.init_b1[i])
# Set start to be the vocab size of the pretrained model. This will add new rows to the data structures,
# for the words in the vocabulary that do not appear in the pretrained model.
start = wv.init_pretrained_config.vocab_size
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in range(start, self.vocab_size):
if isinstance(wv, PoincareWordEmbeddingsKeyedVectors) and wv.init_near_border:
wv.vectors[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
wv.vector_size,
max_abs_value=0.001).astype(self.vector_dtype)
vector_norm = random.uniform(low=0.99, high=0.999)
wv.vectors[i] = wv.vectors[i] / norm(wv.vectors[i]) * vector_norm
self.syn1[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed) * 42,
self.vector_size,
max_abs_value=0.001).astype(self.vector_dtype)
vector_norm = random.uniform(low=0.99, high=0.999)
self.syn1[i] = self.syn1[i] / norm(self.syn1[i]) * vector_norm
else:
wv.vectors[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed),
self.vector_size,
max_abs_value=MAX_ABS_VALUE_EMB_INIT).astype(self.vector_dtype)
self.syn1[i] = self.seeded_vector(
wv.index2word[i] + str(self.seed) * 42,
self.vector_size,
max_abs_value=MAX_ABS_VALUE_EMB_INIT).astype(self.vector_dtype)
if self.with_bias:
# Initialize biases.
self.b0[i] = random.random() * 2 * MAX_ABS_VALUE_EMB_INIT - MAX_ABS_VALUE_EMB_INIT
self.b1[i] = random.random() * 2 * MAX_ABS_VALUE_EMB_INIT - MAX_ABS_VALUE_EMB_INIT
if not self.with_bias:
self.mean_bias = random.random() * 2 * MAX_ABS_VALUE_EMB_INIT - MAX_ABS_VALUE_EMB_INIT
self.var_bias = random.random() * 2 * MAX_ABS_VALUE_EMB_INIT - MAX_ABS_VALUE_EMB_INIT
if self.nn_weights is not None:
# Init biases with both negative and positive small values.
self.nn_weights[0] = self.seeded_vector(
"nn_biases" + str(self.seed),
len(self.nn_weights[0]), max_abs_value=0.00001).astype(self.vector_dtype)
# Init weights only with positive values.
for i in range(1, 3):
self.nn_weights[i] = abs(self.seeded_vector(
"nn_weights" + str(i) + str(self.seed),
len(self.nn_weights[0]), max_abs_value=0.00001).astype(self.vector_dtype))
# Init output bias.
self.nn_output_bias = self.seeded_vector("nn_output_bias" + str(self.seed), 1, max_abs_value=0.0001).astype(
self.vector_dtype)
@staticmethod
def seeded_vector(seed_string, vector_size, max_abs_value=None):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(hash(seed_string) & 0xffffffff)
if max_abs_value is None:
return (once.rand(vector_size) - 0.5) / vector_size
else:
return once.rand(vector_size) * 2 * max_abs_value - max_abs_value
class InitializationConfig:
"""
Class that stores information about the way we want to initialize the embeddings from pretrained embeddings.
"""
def __init__(self, pretrained_model_filename):
model = Glove.load(pretrained_model_filename)
self.vocab_size = len(model.wv.vocab)
self.init_vectors = model.wv.vectors
self.init_syn1= model.trainables.syn1
self.init_b0 = model.trainables.b0
self.init_b1 = model.trainables.b1
class NNConfig(SaveLoad):
def __init__(self, config_str):
config_info = config_str.split("-")
self.num_nodes = int(config_info[0])
self.nonlinearity = config_info[1]
| 22,886 | 47.386892 | 311 | py |
poincare_glove | poincare_glove-master/glove_code/src/utils.py | from numpy import inf, nan
def is_number(x):
if x not in [inf, -inf, nan]:
return True
else:
return False
| 132 | 13.777778 | 33 | py |
poincare_glove | poincare_glove-master/glove_code/scripts/glove_main.py | #!/usr/local/bin/python3
import argparse
import gensim
from gensim.models.callbacks import WordEmbCheckpointSaver
from glove_code.src.glove import Glove, NNConfig, InitializationConfig
from util_scripts.get_model_eval_and_stats import *
import logging
from nltk.corpus import brown
import numpy as np
from numpy import float32 as REAL
from numpy.linalg import norm
import os
import socket
import time
MODEL_FILENAME_PATTERN = "models/glove/{}/glove_ep{}_size{}_lr{}_vocab{}_{}"
INITIALIZATION_MODEL_FILENAME = {
"100D": "data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias",
"vanilla_100D": "data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias",
}
if socket.gethostname() in ["armin", "grinder", "mark", "youagain", "dalabgpu"]: # DALAB machines
INITIALIZATION_MODEL_FILENAME = {
"100D": "/media/hofmann-scratch/Octavian/alext/data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias",
"vanilla_100D": "/media/hofmann-scratch/Octavian/alext/data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias",
}
elif "lo-" in socket.gethostname(): # Leonhard nodes
INITIALIZATION_MODEL_FILENAME = {
"100D": "/cluster/scratch/tifreaa/data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias",
"50x2D": "/cluster/scratch/tifreaa/data/pretrained_models/glove_pretrained_50x2D_ep50_levy_vocab50k_cosh-dist-sq_bias",
"vanilla_100D": "/cluster/scratch/tifreaa/data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias",
}
SEM_GOOGLE_SIZE = 8869
SYN_GOOGLE_SIZE = 10675
GOOGLE_SIZE = 19544
MSR_SIZE = 8000
# IMPORTANT!!!!!!!!!!! First one for each embedding type should be the default.
SUPPORTED_OPTIMIZERS = {
"vanilla": ["adagrad"],
"euclid": ["adagrad"],
"poincare": ["radagrad", "fullrsgd", "wfullrsgd", "ramsgrad"],
"mix-poincare": ["mixradagrad"],
}
# IMPORTANT!! First one is the default.
# This refers to the distance function used in during training.
SUPPORTED_DIST_FUNCTIONS = {
"vanilla": ["dist", "nn"],
"euclid": ["dist-sq", "dist"],
"poincare": ["dist-sq", "dist", "cosh-dist", "cosh-dist-sq", "cosh-dist-pow-*", "log-dist-sq"],
"mix-poincare": ["dist", "dist-sq", "cosh-dist-sq"],
}
SUPPORTED_COOCC_FUNCTIONS = ["log"]
logging.basicConfig(level=logging.INFO)
def precision(eval_result):
return len(eval_result['correct']) / (len(eval_result['correct']) + len(eval_result['incorrect']))
def compute_poincare_aggregate(model, config):
"""
Precompute the average between the target and the context vector, for Poincare embeddings.
We take as average the mid point between w and c on the geodesic that connects the 2 points
(see page 89 in Ungar book).
"""
if config["similarity"] == "poincare":
print("precomputing aggregated vectors w+c for Poincare embeddings")
gamma_w_sq = 1 / (1 - np.sum(model.wv.vectors * model.wv.vectors, axis=1))
gamma_c_sq = 1 / (1 - np.sum(model.trainables.syn1neg * model.trainables.syn1neg, axis=1))
denominator = gamma_w_sq + gamma_c_sq - 1
agg = (model.wv.vectors * (gamma_w_sq / denominator)[:, None] +
model.trainables.syn1neg * (gamma_c_sq / denominator)[:, None])
model.wv.vectors = model.wv.moebius_mul_mat(agg, 0.5)
elif config["similarity"] == "mix-poincare":
print("precomputing aggregated vectors w+c for MIX-Poincare embeddings")
small_emb_size = int(model.vector_size / model.num_embs)
for i in range(model.num_embs):
start = i * small_emb_size
end = (i + 1) * small_emb_size
indexes = range(start, end)
gamma_w_sq = 1 / (1 - np.sum(model.wv.vectors[:, indexes] * model.wv.vectors[:, indexes], axis=1))
gamma_c_sq = 1 / (1 - np.sum(model.trainables.syn1neg[:, indexes] * model.trainables.syn1neg[:, indexes], axis=1))
denominator = gamma_w_sq + gamma_c_sq - 1
agg = (model.wv.vectors[:, indexes] * (gamma_w_sq / denominator)[:, None] +
model.trainables.syn1neg[:, indexes] * (gamma_c_sq / denominator)[:, None])
model.wv.vectors[:, indexes] = model.wv.moebius_mul_mat(agg, 0.5)
else:
print("precomputing aggregated vectors w+c for Euclidean embeddings")
model.wv.vectors = model.wv.vectors + model.trainables.syn1neg
def split_filename(basename):
info = basename.split('_')
return info
# Extract information about a model from the filename.
def parse_model_filename(model_filename):
info_dict = {}
basename = os.path.basename(model_filename)
info = split_filename(basename)
if "pairs" in basename:
return None, basename
info_dict["epochs"] = int(info[1][2:])
info_dict["emb_size"] = int(info[2][4:])
info_dict["lr"] = float(info[3][2:])
info_dict["restrict_vocab"] = int(info[4][5:])
info_dict["similarity"] = info[5]
info_dict["with_bias"] = True if "_bias" in basename else False
info_dict["init_near_border"] = True if "_border-init" in basename else False
for s in info:
if "OPT" in s:
info_dict["optimizer"] = s[3:]
elif "COOCCFUNC" in s:
info_dict["coocc_func"] = s[9:]
elif "DISTFUNC" in s:
info_dict["dist_func"] = s[8:]
elif "scale" in s:
info_dict["use_scaling"] = True
elif "NUMEMBS" in s:
info_dict["num_embs"] = int(s[7:])
elif "logprobs" in s:
info_dict["use_log_probs"] = True
if "optimizer" not in info_dict:
info_dict["optimizer"] = SUPPORTED_OPTIMIZERS[info_dict["similarity"]][0]
if "coocc_func" not in info_dict:
info_dict["coocc_func"] = SUPPORTED_COOCC_FUNCTIONS[0]
if "dist_func" not in info_dict:
info_dict["dist_func"] = SUPPORTED_DIST_FUNCTIONS[info_dict["similarity"]][0]
if "use_scaling" not in info_dict:
info_dict["use_scaling"] = False
if "use_log_probs" not in info_dict:
info_dict["use_log_probs"] = False
return info_dict, basename
# Class that produces output both to stdout and to an output file. Used during evaluation.
class Logger:
def __init__(self, fout=None):
self.fout = fout
def log(self, log_str='', end='\n'):
logging.info(log_str)
if self.fout:
if end == '':
self.fout.write(log_str)
else:
self.fout.write(log_str + end)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train', dest='train', action='store_true',
help='Train a new model.')
parser.add_argument('--eval', dest='train', action='store_false',
help='Eval an existing model.')
parser.add_argument('--use_our_format', dest='use_glove_format', action='store_false',
help='Use our format for reading the vocabulary and the co-occ matrix, instead of the format '
'from the original GloVe code.')
parser.add_argument('--coocc_file', type=str,
help='Filename which contains the coocc matrix in text format.')
parser.add_argument('--vocab_file', type=str,
help='Filename which contains the vocabulary.')
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--euclid', type=int, default=0,
help='Whether it uses Euclidean distance to train the embeddings instead of dot product.')
parser.add_argument('--poincare', type=int, default=0, help='Whether it uses Poincare embeddings or not.')
parser.add_argument('--dist_func', type=str, default="",
help='Distance function used by Poincare model during training.')
parser.add_argument('--num_embs', type=int, default=0,
help='The number of small-dimensional planes that will come into the carthesian product of'
'manifolds')
parser.add_argument('--mix', dest='mix', action='store_true',
help='If true, use a carthesian product of small-dimensional embeddings.')
parser.add_argument('--nn_config', type=str, default="",
help='Configuration of the NN used during training.')
parser.add_argument('--coocc_func', type=str, default="",
help='Co-occurence function used during training.')
parser.add_argument('--use_scaling', dest='use_scaling', action='store_true',
help='Use trainable scaling factor for Poincare GloVe')
parser.add_argument('--epochs', type=int, default=5, help='Number of epochs')
parser.add_argument('--restrict_vocab', type=int, default=400000,
help='Only use the `restrict_vocab` most frequent words')
parser.add_argument('--size', type=int, default=100, help='Embedding size')
parser.add_argument('--optimizer', type=str, default='', help='What optimizer to use.')
parser.add_argument('--lr', type=float, default=0.05, help='Learning rate')
parser.add_argument('--bias', dest='with_bias', action='store_true', help='Use a model with biases.')
parser.add_argument('--workers', type=int, default=3, help='Number of concurrent workers.')
parser.add_argument('--chunksize', type=int, default=1000,
help='Number of `prange` iterations that each thread processes at a time')
parser.add_argument('--model_filename', type=str, default='', help='Path to saved model.')
parser.add_argument('--train_log_filename', type=str, default='', help='Path to the training log.')
parser.add_argument('--cosadd', dest='cosadd', action='store_true',
help='Use 3COSADD when evaluating word analogy.')
parser.add_argument('--cosmul', dest='cosmul', action='store_true',
help='Use 3COSMUL when evaluating word analogy.')
parser.add_argument('--distadd', dest='distadd', action='store_true',
help='Use 3DISTADD when evaluating word analogy.')
parser.add_argument('--hypcosadd', dest='hypcosadd', action='store_true',
help='Use 3COSADD with gyrocosine when evaluating word analogy.')
parser.add_argument('--agg_eval', dest='agg_eval', action='store_true',
help='Use w+c during evaluation, instead of just w. Only works for Poincare embeddings.')
parser.add_argument('--ctx_eval', dest='ctx_eval', action='store_true',
help='Use c during evaluation, instead of w.')
parser.add_argument('--cosine_eval', dest='cosine_eval', action='store_true',
help='Use cosine distance during evaluation, instead of the Poincare distance.')
parser.add_argument('--ckpt_emb', dest='ckpt_emb', action='store_true',
help='Store checkpoints during training with the value of the embedding for certain words')
parser.add_argument('--init_near_border', dest='init_near_border', action='store_true',
help='If set, initialize embeddings near the Poincare ball border, instead of near the origin.')
parser.add_argument('--init_pretrained', dest='init_pretrained', action='store_true',
help='If set, initialize embeddings from pretrained model.')
parser.add_argument('--use_log_probs', dest='use_log_probs', action='store_true',
help='If set, use log-probabilities instead of log-counts during training GloVe.')
parser.add_argument('--debug', dest='is_debug', action='store_true',
help='Run model in debug mode')
parser.set_defaults(train=False, use_glove_format=True, mix=False, with_bias=False, use_scaling=False,
cosadd=False, cosmul=False, distadd=False, hypcosadd=False, cosine_eval=False,
agg_eval=False, ctx_eval=False, shift_origin=False, cosine_dist=False, ckpt_emb=False,
init_near_border=False, init_pretrained=False, use_log_probs=False, is_debug=False)
args = parser.parse_args()
if args.size > 4 and args.size % 4 != 0:
raise RuntimeError("Choose an embedding size that is a multiple of 4 (it speeds up computation)")
model = None
if args.train:
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
callbacks = []
emb_type = None
if args.poincare == 1:
emb_type = 'poincare'
elif args.euclid == 1:
emb_type = 'euclid'
else:
emb_type = 'vanilla'
if args.mix:
emb_type = "mix-" + emb_type
coocc_func = args.coocc_func
if coocc_func == "":
coocc_func = SUPPORTED_COOCC_FUNCTIONS[0] # Set the default
else:
if coocc_func not in SUPPORTED_COOCC_FUNCTIONS:
raise RuntimeError("Unsupported co-occurrence function {}".format(coocc_func))
cosh_dist_pow = 0
dist_func = args.dist_func
if emb_type == "poincare" or emb_type == "mix-poincare" or emb_type == "euclid":
if dist_func == "":
dist_func = SUPPORTED_DIST_FUNCTIONS[emb_type][0] # Set the default
elif "cosh-dist-pow" in dist_func:
cosh_dist_pow = int(dist_func.rsplit("-", 1)[1])
else:
if dist_func not in SUPPORTED_DIST_FUNCTIONS[emb_type]:
raise RuntimeError("Unsupported distance function {} for emb type {}".format(dist_func, emb_type))
num_embs = 0
if "mix-" in emb_type:
if args.num_embs == 0:
raise RuntimeError("Invalid number of small embeddings.")
num_embs = args.num_embs
if args.num_embs != 0 and "mix-" not in emb_type:
raise RuntimeError("num_embs is not supported for this embedding type: {}".format(emb_type))
nn_config = None
if dist_func == "nn":
if args.nn_config == "":
raise RuntimeError("No NN configuration provided!")
nn_config = NNConfig(args.nn_config)
optimizer = args.optimizer
if optimizer == "":
optimizer = SUPPORTED_OPTIMIZERS[emb_type][0] # Set the default
else:
if optimizer not in SUPPORTED_OPTIMIZERS[emb_type]:
raise RuntimeError("Unsupported optimizer {} for embedding type {}".format(optimizer, emb_type))
filename = MODEL_FILENAME_PATTERN.format(
"glove_baseline" if emb_type == "vanilla" else "geometric_emb",
args.epochs, args.size, str(args.lr), args.restrict_vocab, emb_type)
filename = filename + "_OPT" + optimizer
filename = filename + "_COOCCFUNC" + coocc_func
if emb_type != "vanilla":
filename = filename + "_DISTFUNC" + dist_func
elif emb_type == "vanilla" and dist_func == "nn":
filename = filename + "_DISTFUNCnn"
if dist_func == "nn":
filename = filename + "_NN" + args.nn_config
if num_embs:
filename = filename + "_NUMEMBS" + str(num_embs)
if args.with_bias:
filename = filename + "_bias"
if args.use_scaling:
if emb_type != "poincare" and emb_type != "mix-poincare":
raise RuntimeError("Scaling is only supported for Poincare GloVe embeddings.")
filename = filename + "_scale"
if args.use_log_probs:
filename = filename + "_logprobs"
if args.init_near_border:
filename = filename + "_border-init"
initialization_config = None
if args.init_pretrained:
if emb_type == "poincare" and args.size == 100:
pretrained_model_filename = INITIALIZATION_MODEL_FILENAME["100D"]
elif emb_type == "mix-poincare" and args.size == 100 and num_embs == 50:
pretrained_model_filename = INITIALIZATION_MODEL_FILENAME["50x2D"]
elif emb_type == "vanilla" and args.size == 100:
pretrained_model_filename = INITIALIZATION_MODEL_FILENAME["vanilla_100D"]
else:
raise RuntimeError("Undefined pretrained embedding for this setting.")
print("Initializing embeddings from pretrained model", pretrained_model_filename)
initialization_config = InitializationConfig(
pretrained_model_filename=os.path.join(args.root, pretrained_model_filename)
)
filename = filename + "_INITpretrained"
model_filename = os.path.join(args.root, filename)
ckpt_word_list = None
if args.ckpt_emb:
with open(os.path.join(args.root, "msc_tifreaa/data/google_analogy_vocab.txt"), "r") as f:
ckpt_word_list = [word.strip() for word in f.readlines()]
ckpt_filename = "word_emb_checkpoints/emb_ckpt_" + os.path.basename(model_filename)
ckpt_filename = os.path.join(args.root, ckpt_filename)
callbacks.append(WordEmbCheckpointSaver(ckpt_filename=ckpt_filename))
print("[Training] Train new model {} using {}".format(model_filename, optimizer.upper()), end="")
if emb_type == "poincare":
print(" and distance function {}".format(dist_func.upper()))
else:
print("")
model = Glove(
use_glove_format=args.use_glove_format,
coocc_file=args.coocc_file,
vocab_file=args.vocab_file,
restrict_vocab=args.restrict_vocab,
num_workers=args.workers,
chunksize=args.chunksize,
epochs=args.epochs,
euclid=args.euclid,
poincare=args.poincare,
with_bias=args.with_bias,
use_log_probs=args.use_log_probs,
dist_func=dist_func,
cosh_dist_pow=cosh_dist_pow,
num_embs=num_embs,
nn_config=nn_config,
coocc_func=coocc_func,
use_scaling=args.use_scaling,
lr=args.lr,
optimizer=optimizer,
ckpt_word_list=ckpt_word_list,
init_near_border=args.init_near_border,
init_pretrained_config=initialization_config,
callbacks=callbacks,
vector_size=args.size,
vector_dtype=REAL)
if args.use_scaling:
print("Final scaling factor is {}".format(model.scaling_factor))
if optimizer == "wfullrsgd" or optimizer == "fullrsgd":
logging.info("")
logging.info("Number of projections back to the Poincare ball: {}".format(model.num_projections))
# Cleanup model.
model.cleanup()
# Save model.
print("Saving model to {}".format(model_filename))
with open(model_filename, "wb") as f:
model.save(f)
else:
model = Glove.load(args.model_filename)
wv = model.wv
wv.trainables = model.trainables
# XXX: uncomment to evaluate the model with the scaled and projected pretrained embeddings used for initialization
# wv.vectors = model.trainables.initialization_config.init_vectors
directory = os.path.join(args.root, "eval_logs")
if not os.path.exists(directory):
os.makedirs(directory)
# Extract model info from the model filename.
config, basename = parse_model_filename(args.model_filename)
# Ugly fix. To ensure backward compatibilty with an earlier version that used a different convention for the filename.
if config is None:
config = {}
config["similarity"] = "poincare" if ("poincare" in basename) else ("euclid" if ("euclid" in basename) else "vanilla")
analogy_type = None
if args.cosadd:
analogy_type = "cosadd"
elif args.cosmul:
analogy_type = "cosmul"
elif args.distadd:
analogy_type = "distadd"
elif args.hypcosadd:
analogy_type = "hypcosadd"
elif config["similarity"] == "poincare":
if args.cosine_eval:
analogy_type = "hyp_pt-eucl-cos-dist"
else:
analogy_type = "hyp_pt"
elif config["similarity"] == "mix-poincare":
if args.cosine_eval:
analogy_type = "mix-hyp_pt-eucl-cos-dist"
else:
analogy_type = "mix-hyp_pt"
else:
analogy_type = "cosadd" # The default for dot product and Euclidean embeddings is 3COSADD
if config["similarity"] == "mix-poincare" and "num_embs" not in config:
raise RuntimeError("Mix Poincare embeddings should have a valid number of small embeddings")
if args.agg_eval:
compute_poincare_aggregate(model, config)
if args.ctx_eval:
model.wv.vectors = model.trainables.syn1neg
if args.shift_origin:
left_offset = -np.average(model.wv.vectors, axis=0)
# right_offset = -np.average(model.wv.vectors, axis=0)
left_offset_mat = np.tile(left_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))
# right_offset_mat = np.tile(right_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))
model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(
left_offset_mat, model.wv.vectors)
# model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(
# model.wv.vectors, right_offset_mat)
if config["similarity"] == "poincare" or config["similarity"] == "mix-poincare":
if args.cosine_eval:
model.wv.use_poincare_distance = False
model.wv.init_sims()
else:
model.wv.use_poincare_distance = True
# Create name for file that will store the logs.
eval_log_filename = "eval_logs/eval_" + basename.split("_", 1)[1] + "_" + analogy_type + \
("_agg" if args.agg_eval else ("_ctx" if args.ctx_eval else ""))
# eval_log_filename = eval_log_filename + ("_cosdist" if config["similarity"] == "poincare" and args.cosine_eval else "")
eval_log_filename = os.path.join(args.root, eval_log_filename)
feval = None
if args.restrict_vocab != 0:
feval = open(eval_log_filename, "w+")
logger = Logger(feval)
else:
# Don't save the output to file if we are not running the word analogy benchmarks.
logger = Logger()
if len(config) > 1:
logger.log('MODEL: (Epochs, {}), (Emb size, {}), (LR, {}), (Optimizer, {}), (With bias, {}), (Similarity, {}), (Dist. func. {}), (Scaling, {}), (Use Log-Probs, {}), (Restrict vocab, {})'.format(
config["epochs"], config["emb_size"], config["lr"], config["optimizer"].upper(), "yes" if config["with_bias"] else "no",
config["similarity"], config["dist_func"].upper(), config["use_scaling"], config["use_log_probs"],
config["restrict_vocab"]
))
if args.restrict_vocab != 0:
logger.log('EVALUATION: (Analogy type, {}), (Vectors used, {})'.format(
analogy_type, ("W+C" if args.agg_eval else ("C" if args.ctx_eval else "W"))))
else:
logger.log()
sim_debug_file = None
if args.is_debug:
sim_debug_file = os.path.join(args.root, "eval_logs/debug_similarity.csv")
hyperlex_debug_file = None
if args.is_debug:
hyperlex_debug_file = os.path.join(args.root, "eval_logs/debug_hyperlex.csv")
# logger.log("========= Various statistics =========")
# norms_distribution(model)
# wordnet_level_rank_vector_norm_correlation(model, args.root)
logger.log("========= Similarity evaluation =========")
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'rare_word.txt'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("Stanford Rare World: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'wordsim353.tsv'),
dummy4unknown=False,
debug_file=sim_debug_file,
restrict_vocab=args.restrict_vocab
)
logger.log("WordSim353: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'simlex999.txt'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("SimLex999: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'mturk771.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("MTurk771: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'simverb3500.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("SimVerb3500: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'men_dataset.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("MEN: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'MC-30.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("MC: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'RG-65.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("RG: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
pearson, spearman, ratio = wv.evaluate_word_pairs(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'YP-130.tsv'),
dummy4unknown=False,
restrict_vocab=args.restrict_vocab
)
logger.log("YP: {:.4f} {:.4f} {:.4f}".format(pearson[0], spearman[0], ratio))
if args.restrict_vocab != 0:
logger.log("=========== Analogy evaluation ==========")
most_similar = None
if analogy_type == "cosadd" or analogy_type == "hypcosadd":
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy
elif analogy_type == "cosmul":
most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_cosmul_analogy
elif analogy_type == "hyp_pt" or analogy_type == "hyp_pt-eucl-cos-dist":
most_similar = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.batch_most_similar_hyperbolic_analogy
elif analogy_type == "mix-hyp_pt" or analogy_type == "mix-hyp_pt-eucl-cos-dist":
most_similar = gensim.models.keyedvectors.MixPoincareWordEmbeddingsKeyedVectors.batch_most_similar_mix_hyperbolic_analogy
elif analogy_type == "distadd":
most_similar = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.batch_most_similar_3distadd_analogy
else:
raise RuntimeError("Unknown analogy type.")
print(config["similarity"], analogy_type)
if (config["similarity"] == "mix-poincare" or config["similarity"] == 'poincare') and (analogy_type == "cosadd" or analogy_type == "cosmul"):
model.wv.vectors_norm = None
gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.init_sims(model.wv)
if config["similarity"] == "poincare" and args.cosine_eval:
model.wv.vectors_norm = None
gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.init_sims(model.wv)
start = time.time()
analogy_eval = wv.accuracy(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'questions-words.txt'),
restrict_vocab=args.restrict_vocab,
most_similar=most_similar,
debug=args.is_debug)
# Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.
logger.log("Semantic Google: {} {} {:.2f} {:.4f} {:.4f}".format(analogy_eval[-3]['correct'][0],
analogy_eval[-3]['correct'][0] + analogy_eval[-3]['incorrect'][0],
analogy_eval[-3]['t_argmax'][0],
analogy_eval[-3]['correct'][0] / (analogy_eval[-3]['correct'][0] + analogy_eval[-3]['incorrect'][0]),
analogy_eval[-3]['correct'][0] / SEM_GOOGLE_SIZE))
logger.log("Syntactic Google: {} {} {:.2f} {:.4f} {:.4f}".format(analogy_eval[-2]['correct'][0],
analogy_eval[-2]['correct'][0] + analogy_eval[-2]['incorrect'][0],
analogy_eval[-2]['t_argmax'][0],
analogy_eval[-2]['correct'][0] / (analogy_eval[-2]['correct'][0] + analogy_eval[-2]['incorrect'][0]),
analogy_eval[-2]['correct'][0] / SYN_GOOGLE_SIZE))
logger.log("Google: {} {} {:.2f} {:.4f} {:.4f}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['t_argmax'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / GOOGLE_SIZE))
if not args.is_debug:
analogy_eval = wv.accuracy(
os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data/', 'msr_word_relationship.processed'),
restrict_vocab=args.restrict_vocab,
most_similar=most_similar)
# Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.
logger.log("Microsoft: {} {} {:.2f} {:.4f} {:.4f}".format(analogy_eval[-1]['correct'][0],
analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],
analogy_eval[-1]['t_argmax'][0],
analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),
analogy_eval[-1]['correct'][0] / MSR_SIZE))
logging.info("")
logging.info("Analogy task took {} seconds to perform.".format(time.time() - start))
if feval:
feval.close()
| 32,472 | 51.715909 | 206 | py |
poincare_glove | poincare_glove-master/plot/plot_train_loss.py | #!/usr/local/bin/python3
import matplotlib
import numpy as np
import os
import sys
if matplotlib.get_backend() != "MacOSX" and os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
argv[1] = one train log file and it will plot the info about the loss and the word similarity
scores after each epoch
argv[2] = the folder in which the output image will be saved; if argv[2] is missing, then the figure will not be saved
to file
"""
train_log_file = sys.argv[1]
fig_dir = sys.argv[2] if len(sys.argv) > 2 else None
fig_name = train_log_file.split('/')[-1].replace("train_", "fig_train_loss_")
with open(train_log_file, "r") as f:
epoch_end_scores = {
"loss": [],
"rareword": [],
"wordsim": [],
"simlex": []
}
lines = f.readlines()
for line in lines:
if "EPOCH END" in line:
split_line = line.split(",")
epoch_end_scores["loss"].append(float(split_line[0].split(" ")[7]))
epoch_end_scores["rareword"].append(float(split_line[1].split(" ")[2]))
epoch_end_scores["wordsim"].append(float(split_line[2].split(" ")[2]))
epoch_end_scores["simlex"].append(float(split_line[3].split(" ")[2]))
# Convert to numpy arrays.
epoch_end_scores["loss"] = np.array(epoch_end_scores["loss"])
epoch_end_scores["rareword"] = np.array(epoch_end_scores["rareword"])
epoch_end_scores["wordsim"] = np.array(epoch_end_scores["wordsim"])
epoch_end_scores["simlex"] = np.array(epoch_end_scores["simlex"])
# Plotting.
fig = plt.figure(1)
x = range(1, len(epoch_end_scores["loss"])+1)
# Plot end-of-epoch losses.
plt.subplot(211)
plt.plot(x, epoch_end_scores["loss"], color="blue", label="Loss")
plt.xticks(x)
plt.xlabel("Epochs")
plt.ylabel("Log Loss")
plt.title("End of epoch loss")
# Plot end-of-epoch similarity scores.
plt.subplot(212)
l1, = plt.plot(x, epoch_end_scores["rareword"], color="orange")
l2, = plt.plot(x, epoch_end_scores["wordsim"], color="green")
l3, = plt.plot(x, epoch_end_scores["simlex"], color="red")
plt.xticks(x)
plt.xlabel("Epochs")
plt.ylabel("Spearman correlation")
plt.title("End of epoch similarity scores")
plt.legend((l1, l2, l3), ("RareWord", "WordSim", "SimLex"), loc="upper right")
plt.tight_layout()
if fig_dir:
fig.savefig(os.path.join(fig_dir, fig_name))
plt.show()
| 2,485 | 32.146667 | 118 | py |
poincare_glove | poincare_glove-master/plot/plot_word2vec_training.py | #!/usr/local/bin/python3
import matplotlib
import numpy as np
import os
import sys
if matplotlib.get_backend() != "MacOSX" and os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
argv[1] = one log file and it will plot the evolution of the word similarity scores
argv[2] = the folder in which the output image will be saved; if argv[2] is missing, then the figure will not be saved
to file
"""
log_file = sys.argv[1]
fig_dir = sys.argv[2] if len(sys.argv) > 2 else None
fig_name = log_file.split('/')[-1].replace("log_", "fig_valid_")
with open(log_file, "r") as f:
sim_scores = {
"rareword": [],
"wordsim": [],
"simlex": []
}
norms = {
"target_top10": [],
"target_outside_top1000": [],
"context_top10": [],
"context_outside_top1000": []
}
lines = f.readlines()
for line in lines:
if "EPOCH " in line:
split_line = [s.strip(" ,") for s in line.strip().split("-", 2)[2].split(" ")]
sim_scores["rareword"].append(float(split_line[2]))
sim_scores["wordsim"].append(float(split_line[4]))
sim_scores["simlex"].append(float(split_line[6]))
norms["target_top10"].append(float(split_line[8]))
norms["target_outside_top1000"].append(float(split_line[12]))
norms["context_top10"].append(float(split_line[10]))
norms["context_outside_top1000"].append(float(split_line[14]))
# Convert to numpy arrays.
sim_scores["rareword"] = np.array(sim_scores["rareword"])
sim_scores["wordsim"] = np.array(sim_scores["wordsim"])
sim_scores["simlex"] = np.array(sim_scores["simlex"])
norms["target_top10"] = np.array(norms["target_top10"])
norms["target_outside_top1000"] = np.array(norms["target_outside_top1000"])
norms["context_top10"] = np.array(norms["context_top10"])
norms["context_outside_top1000"] = np.array(norms["context_outside_top1000"])
# Plotting.
fig = plt.figure(1)
x = range(sim_scores["rareword"].shape[0])
# Plot evolution of similarity scores.
plt.subplot(211)
l1, = plt.plot(x, sim_scores["rareword"], color="orange")
l2, = plt.plot(x, sim_scores["wordsim"], color="green")
l3, = plt.plot(x, sim_scores["simlex"], color="red")
plt.ylabel("Spearman correlation")
plt.title("Evolution of similarity scores during training")
plt.legend((l1, l2, l3), ("RareWord", "WordSim", "SimLex"))
plt.subplot(212)
l1, = plt.plot(x, norms["target_top10"], color="orange")
l2, = plt.plot(x, norms["target_outside_top1000"], color="red")
l3, = plt.plot(x, norms["context_top10"], color="green")
l4, = plt.plot(x, norms["context_outside_top1000"], color="blue")
plt.ylabel("Vector norms")
plt.title("Evolution of vector norms during training")
plt.legend((l1, l2, l3, l4), ("Target-Top10", "Target-Outside Top1k", "Context-Top10", "Context-Outside Top1k"))
plt.tight_layout()
if fig_dir:
fig.savefig(os.path.join(fig_dir, fig_name))
plt.show()
| 3,102 | 35.940476 | 118 | py |
poincare_glove | poincare_glove-master/plot/plot_glove_training.py | #!/usr/local/bin/python3
import matplotlib
import numpy as np
import os
import sys
if matplotlib.get_backend() != "MacOSX" and os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
argv[1] = one log file and it will plot the evolution of the word similarity scores
argv[2] = the folder in which the output image will be saved; if argv[2] is missing, then the figure will not be saved
to file
"""
log_file = sys.argv[1]
fig_dir = sys.argv[2] if len(sys.argv) > 2 else None
fig_name = log_file.split('/')[-1].replace("log_", "fig_") + ".png"
with open(log_file, "r") as f:
sim_scores = {
"rareword": [],
"wordsim": [],
"simlex": []
}
analogy_scores = {
"epoch_number": [],
"google": [],
"msr": []
}
epoch_loss = []
norms = {
"target_top10": [],
"target_outside_top1000": [],
"context_top10": [],
"context_outside_top1000": []
}
lines = f.readlines()
for line in lines:
if "Similarity:" in line:
split_line = [s.strip(" ,") for s in line.strip().split(":", 1)[1].split(" ")]
sim_scores["rareword"].append(float(split_line[2]))
sim_scores["wordsim"].append(float(split_line[4]))
sim_scores["simlex"].append(float(split_line[6]))
norms["target_top10"].append(float(split_line[8]))
norms["target_outside_top1000"].append(float(split_line[12]))
norms["context_top10"].append(float(split_line[10]))
norms["context_outside_top1000"].append(float(split_line[14]))
elif "Analogy:" in line:
split_line = [s.strip(" ,") for s in line.strip().split(" ")]
analogy_scores["epoch_number"].append(int(split_line[2]))
analogy_scores["google"].append(float(split_line[5]))
analogy_scores["msr"].append(float(split_line[7]))
elif "EPOCH - " in line:
epoch_loss.append(float(line.strip().rsplit(" ", 1)[1]))
# Convert to numpy arrays.
sim_scores["rareword"] = np.array(sim_scores["rareword"])
sim_scores["wordsim"] = np.array(sim_scores["wordsim"])
sim_scores["simlex"] = np.array(sim_scores["simlex"])
norms["target_top10"] = np.array(norms["target_top10"])
norms["target_outside_top1000"] = np.array(norms["target_outside_top1000"])
norms["context_top10"] = np.array(norms["context_top10"])
norms["context_outside_top1000"] = np.array(norms["context_outside_top1000"])
analogy_scores["epoch_number"] = np.array(analogy_scores["epoch_number"])
analogy_scores["google"] = np.array(analogy_scores["google"])
analogy_scores["msr"] = np.array(analogy_scores["msr"])
epoch_loss = np.array(epoch_loss)
# Plotting similarity scores and norms.
fig = plt.figure("Glove training", figsize=(15, 8))
x = range(sim_scores["rareword"].shape[0])
# Plot evolution of similarity scores.
plt.subplot(221)
l1, = plt.plot(x, sim_scores["rareword"], color="orange")
l2, = plt.plot(x, sim_scores["wordsim"], color="green")
l3, = plt.plot(x, sim_scores["simlex"], color="red")
plt.ylabel("Spearman correlation")
plt.title("Evolution of similarity scores during training")
plt.legend((l1, l2, l3), ("RareWord", "WordSim", "SimLex"))
# Plot evolution of analogy scores.
plt.subplot(222)
analogy_x = analogy_scores["epoch_number"]
l1, = plt.plot(analogy_x, analogy_scores["google"], color="orange")
l2, = plt.plot(analogy_x, analogy_scores["msr"], color="green")
plt.ylabel("Analogy accuracy")
plt.title("Evolution of analogy scores during training")
plt.legend((l1, l2), ("Google", "MSR"))
plt.subplot(223)
l1, = plt.plot(x, norms["target_top10"], color="orange")
l2, = plt.plot(x, norms["target_outside_top1000"], color="red")
l3, = plt.plot(x, norms["context_top10"], color="green")
l4, = plt.plot(x, norms["context_outside_top1000"], color="blue")
plt.ylabel("Vector norms")
plt.title("Evolution of vector norms during training")
plt.legend((l1, l2, l3, l4), ("Target-Top10", "Target-Outside Top1k", "Context-Top10", "Context-Outside Top1k"))
plt.subplot(224)
l1, = plt.plot(x, epoch_loss, color="red")
plt.ylabel("Epoch loss")
plt.title("Evolution of the epoch loss during training")
plt.tight_layout()
if fig_dir:
fig.savefig(os.path.join(fig_dir, fig_name))
plt.show()
| 4,485 | 38.008696 | 118 | py |
poincare_glove | poincare_glove-master/docker/check_fast_version.py | import sys
try:
from gensim.models.word2vec_inner import FAST_VERSION
print('FAST_VERSION ok ! Retrieved with value ', FAST_VERSION)
sys.exit()
except ImportError:
print('Failed... fall back to plain numpy (20-80x slower training than the above)')
sys.exit(-1)
| 283 | 24.818182 | 87 | py |
poincare_glove | poincare_glove-master/docs/src/conf.py | # -*- coding: utf-8 -*-
#
# gensim documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 17 13:42:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
html_theme = 'gensim_theme'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'indextoc'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': './_templates/indexcontent.html'}
# General information about the project.
project = u'gensim'
copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# main_colour = "#ffbbbb"
html_theme_options = {
# "rightsidebar": "false",
# "stickysidebar": "true",
# "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "sidebarbgcolor": "fuckyou",
# "footerbgcolor": "#771111",
# "relbarbgcolor": "#993333",
# "sidebartextcolor": "#000000",
# "sidebarlinkcolor": "#330000",
# "codebgcolor": "#fffff0",
# "headtextcolor": "#000080",
# "headbgcolor": "#f0f0ff",
# "bgcolor": "#ffffff",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "gensim"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = ''
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_domain_indices = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'gensimdoc'
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
| 7,370 | 32.657534 | 114 | py |
poincare_glove | poincare_glove-master/docs/notebooks/test_notebooks.py | import os
import sys
import tempfile
from glob import glob
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output.
:returns (parsed nb object, execution errors)
"""
kernel_name = 'python%d' % sys.version_info[0]
this_file_directory = os.path.dirname(__file__)
errors = []
with tempfile.NamedTemporaryFile(suffix=".ipynb", mode='wt') as fout:
with open(path) as f:
nb = nbformat.read(f, as_version=4)
nb.metadata.get('kernelspec', {})['name'] = kernel_name
ep = ExecutePreprocessor(kernel_name=kernel_name, timeout=10)
try:
ep.preprocess(nb, {'metadata': {'path': this_file_directory}})
except CellExecutionError as e:
if "SKIP" in e.traceback:
print(str(e.traceback).split("\n")[-2])
else:
raise e
except RuntimeError as e:
print(e)
finally:
nbformat.write(nb, fout)
return nb, errors
def test_notebooks():
for notebook in glob("*.ipynb"):
if " " in notebook:
continue
print("Testing {}".format(notebook))
nb, errors = _notebook_run(notebook)
assert errors == []
| 1,426 | 29.361702 | 78 | py |
poincare_glove | poincare_glove-master/util_scripts/nickel_transitive_closure.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from nltk.corpus import wordnet as wn
# make sure each edge is included only once
edges = set()
for synset in wn.all_synsets(pos='n'):
# write the transitive closure of all hypernyms of a synset to file
for hyper in synset.closure(lambda s: s.hypernyms()):
edges.add((synset.name(), hyper.name()))
# also write transitive closure for all instances of a synset
for instance in synset.instance_hyponyms():
for hyper in instance.closure(lambda s: s.instance_hypernyms()):
edges.add((instance.name(), hyper.name()))
for h in hyper.closure(lambda s: s.hypernyms()):
edges.add((instance.name(), h.name()))
with open('../data/wordnet_noun_closure.tsv', 'w') as fout:
for i, j in edges:
fout.write(f'{i}\t{j}\n')
| 1,008 | 35.035714 | 72 | py |
poincare_glove | poincare_glove-master/util_scripts/extract_coocc_pairs_restrict_vocab.py | from glove_code.src.glove_inner import extract_restrict_vocab_pairs
import sys
in_file = sys.argv[1]
out_file = sys.argv[2]
restrict_vocab = 50000
num_pairs = extract_restrict_vocab_pairs(in_file, out_file, restrict_vocab=restrict_vocab)
print(num_pairs, "for a vocab of", restrict_vocab, "words")
# basename = filename.rsplit(".", 1)[0]
# output = basename+"_vocab"+str(restrict_vocab)+".bin"
# print(output)
#
# num_pairs = read_all(use_glove_format=True, filename=output)
# print(num_pairs) | 496 | 30.0625 | 90 | py |
poincare_glove | poincare_glove-master/util_scripts/format_eval_logs.py | import glob
import os
import sys
model_path_pattern = sys.argv[1]
root = sys.argv[2]
def extract_scores_from_file(file):
scores = [-1] * 13
with open(file, "r") as f:
lines = f.readlines()
for line in lines:
if ":" not in line:
continue
line = line.strip().split(":")
info = line[1].strip().split(" ")
if "Rare World" in line[0]:
scores[0] = info[1]
elif "WordSim" in line[0]:
scores[1] = info[1]
elif "SimLex" in line[0]:
scores[2] = info[1]
elif "MTurk" in line[0]:
scores[3] = info[1]
elif "SimVerb" in line[0]:
scores[4] = info[1]
elif "MEN" in line[0]:
scores[5] = info[1]
elif "MC" in line[0]:
scores[6] = info[1]
elif "RG" in line[0]:
scores[7] = info[1]
elif "YP" in line[0]:
scores[8] = info[1]
elif "Semantic Google" in line[0]:
scores[9] = info[4]
elif "Syntactic Google" in line[0]:
scores[10] = info[4]
elif "Google" in line[0]:
scores[11] = info[4]
elif "Microsoft" in line[0]:
scores[12] = info[4]
return scores
for model_file in list(glob.glob(model_path_pattern)):
basename = os.path.basename(model_file)
print(basename)
eval_log_file = "eval_" + basename.split("_", 1)[1]
eval_log_file = os.path.join(root, "eval_logs", eval_log_file)
if "NUMEMBS" in basename:
eval_log_file += "_mix-hyp_pt"
else:
eval_log_file += "_hyp_pt"
print(eval_log_file)
# Hyperbolic distance, W
hyp_w_scores = extract_scores_from_file(eval_log_file)
# Hyperbolic distance, W+C
hyp_wc_scores = extract_scores_from_file(eval_log_file + "_agg")
# Euclidean cosine distance, W
cos_w_scores = extract_scores_from_file(eval_log_file + "-eucl-cos-dist")
# Euclidean cosine distance, W
cos_wc_scores = extract_scores_from_file(eval_log_file + "-eucl-cos-dist_agg")
print("\t".join([str(w) + " / " + str(wc) for w, wc in zip(hyp_w_scores, hyp_wc_scores)]))
print("\t".join([str(w) + " / " + str(wc) for w, wc in zip(cos_w_scores, cos_wc_scores)]))
print()
| 2,389 | 29.253165 | 94 | py |
poincare_glove | poincare_glove-master/util_scripts/text2bin_coocc_data.py | from glove_code.src.glove_inner import write_all, read_all
import sys
filename = sys.argv[1]
restrict_vocab = 200000
write_all(filename, restrict_vocab=restrict_vocab)
# basename = filename.rsplit(".", 1)[0]
# output = basename+"_vocab"+str(restrict_vocab)+".bin"
# print(output)
#
# num_pairs = read_all(use_glove_format=True, filename=output)
# print(num_pairs) | 366 | 25.214286 | 62 | py |
poincare_glove | poincare_glove-master/util_scripts/lexical_entailment_eval.py | import argparse
import gensim
from glove_code.src.glove import Glove
from gensim.models.keyedvectors import PoincareWordEmbeddingsKeyedVectors as pkv
from gensim.models.callbacks import LossLogger, LossSetter
import json
from nltk.corpus import wordnet as wn
import numpy as np
import os
import random
from scipy import stats
from scipy.linalg import block_diag
import sys
MODEL = None
ROOT = ".."
hyperlex_file = os.path.join(ROOT, "data/hyperlex-data/hyperlex-all.txt")
wbless_file = os.path.join(ROOT, "data/BLESS_datasets/weeds_bless.json")
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2`, inclusive."""
for c in range(ord(c1), ord(c2)+1):
yield chr(c)
def read_hyperlex_format(filename, model):
wv = model.wv
with open(filename, "r") as f:
lines = [line.strip().split() for line in f.readlines()[1:]]
result = []
hyperlex_vocab = {}
discarded_count = 0
for line in lines:
if line[0] not in wv.vocab or line[1] not in wv.vocab:
discarded_count += 1
continue
result.append([line[0], line[1], wv.vocab[line[0]].index, wv.vocab[line[1]].index, float(line[5])])
hyperlex_vocab[line[0]] = [line[0]]
hyperlex_vocab[line[1]] = [line[1]]
# print("Discarded {} pairs out of {}".format(discarded_count, len(lines)))
return np.array(result), hyperlex_vocab
def read_wbless(filename):
with open(filename, "r") as f:
wbless_data = json.load(f)
wbless_vocab = {}
for w1, w2, _ in wbless_data:
wbless_vocab[w1] = w1
wbless_vocab[w2] = w2
return wbless_data, wbless_vocab
def mix_poincare_moebius_add_mat(A, B, num_embs):
small_emb_size = int(A.shape[1] / num_embs)
result = np.empty_like(A)
for i in range(num_embs):
start = i * small_emb_size
end = (i + 1) * small_emb_size
result[:, start:end] = pkv.moebius_add_mat(A[:, start:end], B[:, start:end])
return result
def mix_poincare_moebius_mul_mat(A, r, num_embs):
small_emb_size = int(A.shape[1] / num_embs)
result = np.empty_like(A)
for i in range(num_embs):
start = i * small_emb_size
end = (i + 1) * small_emb_size
result[:, start:end] = pkv.moebius_mul_mat(A[:, start:end], r)
return result
def fisher_info_distance(v, w, num_embs):
v = v.reshape(-1)
w = w.reshape(-1)
half_plane_dists = []
small_emb_size = int(v.shape[0] / num_embs)
if small_emb_size != 2:
raise RuntimeError("Only implemented for Cartesian product of 2D spaces; Current small_emb_size is {}".format(
small_emb_size))
diff = v - w
for i in range(num_embs):
start = i * small_emb_size
end = (i + 1) * small_emb_size
half_plane_dists.append(
np.arccosh(1 + np.dot(diff[start:end], diff[start:end]) / (2 * v[start + 1] * w[start + 1]))
)
return np.linalg.norm(half_plane_dists) * np.sqrt(2)
# Rotate the rows of X. Each row is split into smaller 2D subspaces. sin_cos_vector contains (sin a, cos a) pairs,
# where a is the angle with which we rotate that 2D portion of a row of X.
def rotate_mat(sin_cos_vector, X):
# Create rotation matrix.
cos_sin_blocks = [[[c, -s], [s, c]] for s, c in sin_cos_vector.reshape(-1, 2)]
rotation_matrix = cos_sin_blocks[0]
for i in range(1, len(cos_sin_blocks)):
rotation_matrix = block_diag(rotation_matrix, cos_sin_blocks[i])
return (np.matmul(rotation_matrix, X.T)).T
def poincare_ball2half_plane(A, num_embs):
small_emb_size = int(A.shape[1] / num_embs)
result = np.empty_like(A)
for i in range(num_embs):
start = i * small_emb_size
x = A[:, start]
y = A[:, start + 1]
denominator = x * x + (1 - y) * (1 - y)
result[:, start] = 2 * x / denominator
result[:, start + 1] = (1 - x * x - y * y) / denominator
return result
def get_gaussians(model, hyperlex_vocab, wbless_vocab, unsupervised=False, aggregate="w",
scaling_factor=1.0, words_to_use=400):
wv = model.wv
wordnet_selected_words_file = os.path.join(ROOT,
"msc_tifreaa/glove_code/data/wordnet_topmost_and_bottommost_words.txt")
with open(wordnet_selected_words_file, "r") as f:
# Select words that are on the top-most/bottom-most levels in WordNet, that are included in the
# vocabulary of the model AND that do not appear in any of the HyperLex pairs.
top_level_word_idxs = np.array([wv.vocab[word].index for word in filter(
lambda w: w in wv.vocab and w not in hyperlex_vocab and w not in wbless_vocab,
f.readline().strip().split(" "))])
bottom_level_word_idxs = np.array([wv.vocab[word].index for word in filter(
lambda w: w in wv.vocab and w not in hyperlex_vocab and w not in wbless_vocab,
f.readline().strip().split(" "))])
if unsupervised:
top_level_word_idxs = range(0, words_to_use)
bottom_level_word_idxs = range(50000 - words_to_use, 50000)
else:
top_level_word_idxs = top_level_word_idxs[:words_to_use]
bottom_level_word_idxs = bottom_level_word_idxs[-words_to_use:]
print(len(top_level_word_idxs), "words from the TOP-most levels were selected")
print(len(bottom_level_word_idxs), "words from the BOTTOM-most levels were selected")
if aggregate == "w":
vectors = wv.vectors
elif aggregate == "c":
vectors = model.trainables.syn1neg
else:
return None
# Rescale ALL embeddings.
rescaled_vectors = mix_poincare_moebius_mul_mat(vectors, scaling_factor, model.num_embs)
# Compute EUCLIDEAN average of top/bottom-most levels.
top_and_bottom_levels_avg = np.mean(
rescaled_vectors[np.concatenate((top_level_word_idxs, bottom_level_word_idxs)), :],
axis=0)
# Recenter ALL embeddings.
mean_mat = np.repeat(top_and_bottom_levels_avg.reshape(1, -1), rescaled_vectors.shape[0], axis=0)
recentered_vectors = mix_poincare_moebius_add_mat(-mean_mat, rescaled_vectors, model.num_embs)
# Compute EUCLIDEAN average of the recentered top-most levels.
top_levels_avg = np.mean(recentered_vectors[top_level_word_idxs, :], axis=0).reshape(-1, 2)
top_levels_avg_norm = (top_levels_avg / np.linalg.norm(top_levels_avg, axis=1)[:, None]).reshape(-1)
# print(np.linalg.norm(top_levels_avg_norm.reshape(-1, 2), axis=1))
# Rotate ALL embeddings.
rotated_vectors = rotate_mat(top_levels_avg_norm, recentered_vectors)
# Isometry to convert from Poincare ball model to half-plane model.
half_plane_vectors = poincare_ball2half_plane(rotated_vectors, model.num_embs)
# print("HP shape", half_plane_vectors.shape)
# Convert half-plane points to gaussian parameters.
gaussians = half_plane_vectors.reshape(-1, model.num_embs, 2)
gaussians[:, :, 0] /= np.sqrt(2)
print("Gaussians shape", gaussians.shape)
return gaussians
def get_KL_score(w1_idx, w2_idx, gaussians, **kwargs):
v1, v2 = gaussians[w1_idx].reshape(-1, 2), gaussians[w2_idx].reshape(-1, 2)
agg_neg_kl = 0.0
for i in range(v1.shape[0]):
m1, s1 = v1[i]
m2, s2 = v2[i]
curr_kl = 1.0 / 2 * (2 * np.log(s2 / s1) + (s1 / s2) ** 2 + (m1 - m2) ** 2 / s2 ** 2 - 1)
agg_neg_kl -= curr_kl
return agg_neg_kl
def get_cos_sim_score(w1_idx, w2_idx, gaussians, **kwargs):
v1, v2 = gaussians[w1_idx].reshape(-1, 2), gaussians[w2_idx].reshape(-1, 2)
cos_sims = []
for i in range(v1.shape[0]):
m1, _ = v1[i]
m2, _ = v2[i]
cos_sims.append(np.dot(m1, m2) / (np.linalg.norm(m1) + np.linalg.norm(m2) + 1e-10))
return np.sum(cos_sims)
def get_nickel_score(w1_idx, w2_idx, score_type, gaussians, model, alpha_L=1000, alpha_N=1000, nickel_threshold=2.0,
debug_data=None, sigma_factor=1.0, **kwargs):
v1, v2 = gaussians[w1_idx].reshape(-1, 2), gaussians[w2_idx].reshape(-1, 2)
log_cardinal_v1 = v1.shape[0] * np.log(2 * sigma_factor) + np.log(v1[:, 1]).sum()
log_cardinal_v2 = v2.shape[0] * np.log(2 * sigma_factor) + np.log(v2[:, 1]).sum()
fisher_dist = fisher_info_distance(gaussians[w1_idx], gaussians[w2_idx], model.wv.num_embs)
if debug_data != None:
debug_data.append((round(log_cardinal_v1, 2), round(log_cardinal_v2, 2), round(fisher_dist, 2)))
if score_type == 'L':
return -(1 + alpha_L * (log_cardinal_v1 - log_cardinal_v2)) * fisher_dist
elif score_type == 'M':
return -(1 + alpha_L * (log_cardinal_v1 - log_cardinal_v2)) * max(fisher_dist, nickel_threshold)
elif score_type == 'N':
return -(1 + alpha_N * (1.0 / log_cardinal_v2 - 1.0 / log_cardinal_v1)) * fisher_dist
elif score_type == 'O':
return -(1 + alpha_N * (1.0 / log_cardinal_v2 - 1.0 / log_cardinal_v1)) * max(fisher_dist, nickel_threshold)
else:
raise RuntimeError("Unknown score_type")
def get_is_a_score(w1_idx, w2_idx, score_type, gaussians, model, kiela_threshold=2.0, sigma_factor=1, **kwargs):
v1, v2 = gaussians[w1_idx].reshape(-1, 2), gaussians[w2_idx].reshape(-1, 2)
log_cardinal_v1 = v1.shape[0] * np.log(2 * sigma_factor) + np.log(v1[:, 1]).sum()
log_cardinal_v2 = v2.shape[0] * np.log(2 * sigma_factor) + np.log(v2[:, 1]).sum()
# cos_sim = 0.0
cardinal_intersection = 1.0
for i in range(v1.shape[0]):
m1, s1 = v1[i]
m2, s2 = v2[i]
s1 *= sigma_factor
s2 *= sigma_factor
a, b = m1 - s1, m1 + s1
c, d = m2 - s2, m2 + s2
if d > b:
if c > a:
cardinal_intersection = cardinal_intersection * max(0, b - c)
else:
cardinal_intersection = cardinal_intersection * (b - a)
else:
if c > a:
cardinal_intersection = cardinal_intersection * (d - c)
else:
cardinal_intersection = cardinal_intersection * max(0, d - a)
# cos_sim += np.dot(m1, m2) / (np.linalg.norm(m1) + np.linalg.norm(m2) + 1e-10)
fisher_dist = fisher_info_distance(gaussians[w1_idx], gaussians[w2_idx], model.wv.num_embs)
cardinal_intersection = np.log(1e-10 + cardinal_intersection)
if score_type == 'C':
return -log_cardinal_v1
elif score_type == 'D':
return log_cardinal_v2
elif score_type == 'E':
return cardinal_intersection
elif score_type == 'F':
return cardinal_intersection - log_cardinal_v1
elif score_type == 'G':
return cardinal_intersection + log_cardinal_v2
elif score_type == 'H':
return cardinal_intersection + log_cardinal_v2 - log_cardinal_v1
elif score_type == 'I':
return log_cardinal_v2 - log_cardinal_v1
elif score_type == 'J':
return 1 - log_cardinal_v1 / log_cardinal_v2
elif score_type == 'K':
return 1 - log_cardinal_v1 / log_cardinal_v2 if fisher_dist < kiela_threshold else 0.0
else:
raise RuntimeError("Unknown score_type")
def get_hyperlex_score(model, gaussians, score_type, sigma_factor=1.0,
kiela_threshold=None, nickel_threshold=None, alpha_L=None, alpha_N=None):
gold_scores = []
model_scores = []
if score_type == 'A':
score_function = get_cos_sim_score
elif score_type == 'B':
score_function = get_KL_score
elif score_type in ['L', 'M', 'N', 'O']:
score_function = get_nickel_score
else:
score_function = get_is_a_score
for _, _, w1_idx, w2_idx, gold_score in hyperlex_data:
gold_scores.append(float(gold_score))
model_scores.append(score_function(int(w1_idx), int(w2_idx), score_type=score_type, gaussians=gaussians,
model=model, sigma_factor=sigma_factor,
kiela_threshold=kiela_threshold, nickel_threshold=nickel_threshold,
alpha_L=alpha_L, alpha_N=alpha_N))
return stats.spearmanr(gold_scores, model_scores)[0]
def wbless_eval(model, gaussians, score_type, sigma_factor=1.0,
kiela_threshold=None, nickel_threshold=None, alpha_L=None, alpha_N=None):
correct_count, total_count = 0.0, 0.0
if score_type == 'A':
score_function = get_cos_sim_score
elif score_type == 'B':
score_function = get_KL_score
elif score_type in ['L', 'M', 'N', 'O']:
score_function = get_nickel_score
else:
score_function = get_is_a_score
instances = []
for w1, w2, label in wbless_data:
if w1 not in model.wv.vocab or w2 not in model.wv.vocab:
continue
total_count += 1
model_score = score_function(model.wv.vocab[w1].index, model.wv.vocab[w2].index, score_type=score_type,
gaussians=gaussians, model=model, sigma_factor=sigma_factor,
kiela_threshold=kiela_threshold, nickel_threshold=nickel_threshold,
alpha_L=alpha_L, alpha_N=alpha_N)
instances.append((w1, w2, label, model_score))
threshold = np.mean(np.array([model_score for _, _, _, model_score in instances]))
correct = list(filter(lambda instance: (instance[3] > threshold and instance[2] == 1) or (
instance[3] <= threshold and instance[2] == 0), instances))
return float(len(correct)) / len(instances), len(instances), len(wbless_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--restrict_vocab', type=int, default=200000,
help='Size of vocab. Only used for evaluating analogy.')
parser.add_argument('--model_filename', type=str, default='', help='Path to saved model.')
parser.add_argument('--unsupervised', dest='unsupervised', action='store_true',
help='Evaluate unsupervised; get words by frequency, instead of from WordNet, in order to'
'determine the centering/rotation.')
parser.add_argument('--words_to_use', type=int, default=400,
help='Number of generic words and specific words to use in order to determine the '
'translation/rotation.')
parser.add_argument('--agg_eval', dest='agg_eval', action='store_true',
help='Use w+c during evaluation, instead of just w. Only works for Poincare embeddings.')
parser.set_defaults(unsupervised=False, agg_eval=False)
args = parser.parse_args()
MODEL = Glove.load(args.model_filename)
ROOT = args.root
hyperlex_data, hyperlex_vocab = read_hyperlex_format(hyperlex_file, MODEL)
wbless_data, wbless_vocab = read_wbless(wbless_file)
gaussians = get_gaussians(MODEL, hyperlex_vocab, wbless_vocab, unsupervised=args.unsupervised,
scaling_factor=1.0, words_to_use=args.words_to_use)
if "DISTFUNCcosh-dist-sq" in args.model_filename:
ALPHA_L = 1000
ALPHA_N = 1000
KIELA_THRESHOLD = 2.0
NICKEL_THRESHOLD = 2.0
elif "DISTFUNCdist-sq" in args.model_filename:
ALPHA_L = 1000
ALPHA_N = 1000
KIELA_THRESHOLD = 4.0
NICKEL_THRESHOLD = 4.0
else:
raise RuntimeError("Unsupported model type")
hyperlex_scores = []
wbless_scores = []
for score_type in char_range('A', 'O'):
hyperlex_scores.append(get_hyperlex_score(MODEL, gaussians, score_type=score_type, sigma_factor=1.0,
kiela_threshold=KIELA_THRESHOLD, nickel_threshold=NICKEL_THRESHOLD,
alpha_L=ALPHA_L, alpha_N=ALPHA_N))
wbless_scores.append(wbless_eval(MODEL, gaussians, score_type=score_type, sigma_factor=1.0,
kiela_threshold=KIELA_THRESHOLD, nickel_threshold=NICKEL_THRESHOLD,
alpha_L=ALPHA_L, alpha_N=ALPHA_N)[0])
print("HyperLex:")
print("\t".join([str(round(x, 4)) for x in hyperlex_scores]))
print("WBLESS:")
print("\t".join([str(round(x, 4)) for x in wbless_scores]))
| 16,440 | 40.309045 | 118 | py |
poincare_glove | poincare_glove-master/util_scripts/get_model_eval_and_stats.py | import argparse
import gensim
from glove_code.src.glove import Glove
from gensim.models.callbacks import LossLogger, LossSetter
import json
from nltk.corpus import wordnet as wn
import numpy as np
import os
import random
from scipy import stats
from sklearn.linear_model import RidgeCV
import sys
# TODO: maybe tune alpha too (see "things to talk about in next meeting)
# alphas = np.array(range(21)) * 0.1
alphas = [1.0]
trunc_thresholds = np.array(range(21)) * 0.08
MODEL = None
# OUTDATED
def hyperlex_unsupervised(model, root, is_debug, debug_file=None):
print("Unsupervised HyperLex scores (full dataset):")
max_score, max_alpha, max_thresh = 0, 0, 0
for alpha in alphas:
if isinstance(model, Glove):
alpha = -alpha
curr_max_score, curr_max_thresh = 0, 0
for thresh in trunc_thresholds:
spearman, _, _, _, _ = model.wv.evaluate_lexical_entailment(
os.path.join(root, 'data/hyperlex-data', 'hyperlex-all.txt'),
dummy4unknown=False,
alpha=alpha,
trunc_threshold=thresh
)
if is_debug:
print("alpha={};\tthresh={}\t=>\t{}".format(alpha, thresh, spearman[0]))
sys.stdout.flush()
if spearman[0] > curr_max_score:
curr_max_score = spearman[0]
curr_max_thresh = thresh
if curr_max_score > max_score:
max_score = curr_max_score
max_alpha = alpha
max_thresh = curr_max_thresh
if is_debug:
print("For alpha={}, max score is {}, for thresh={}".format(alpha, curr_max_score, curr_max_thresh))
print()
sys.stdout.flush()
# Run best model on nouns and verbs splits.
spearman_nouns, _, _, _, _ = model.wv.evaluate_lexical_entailment(
os.path.join(root, 'data/hyperlex-data/nouns-verbs', 'hyperlex-nouns.txt'),
dummy4unknown=False,
alpha=max_alpha,
trunc_threshold=max_thresh,
debug_file=debug_file
)
spearman_verbs, _, _, _, _ = model.wv.evaluate_lexical_entailment(
os.path.join(root, 'data/hyperlex-data/nouns-verbs', 'hyperlex-verbs.txt'),
dummy4unknown=False,
alpha=max_alpha,
trunc_threshold=max_thresh
)
print("\t- MAX SCORE (alpha/threshold/corr_nouns/corr_verbs/corr_all): {:.2f} / {:.2f} / {:.4f} / {:.4f} / {:.4f}".format(
max_alpha, max_thresh, spearman_nouns[0], spearman_verbs[0], max_score))
def hyperlex_supervised(model, root, split_type):
print("Supervised HyperLex scores ({} split):".format(split_type))
hyperlex_training_file = os.path.join(root, "data/hyperlex-data/splits/"+split_type+"/hyperlex_training_all_"+split_type+".txt")
hyperlex_test_file = os.path.join(root, "data/hyperlex-data/splits/"+split_type+"/hyperlex_test_all_"+split_type+".txt")
train_set = read_hyperlex_format(model, hyperlex_training_file)
test_set = read_hyperlex_format(model, hyperlex_test_file)
train_features, train_labels = extract_vector_features(model, train_set)
test_features, test_labels = extract_vector_features(model, test_set)
# Train and evaluate Ridge regression model.
regression_model = RidgeCV(cv=3,
alphas=[100.0, 75.0, 60.0, 50.0, 40.0, 35.0, 25.0, 20.0, 10.0, 5.0, 2.0, 1.0, 0.5, 0.25, 0.1],
fit_intercept=True)
regression_model.fit(train_features, train_labels)
# print("\t- Training set: {:.4f}".format(eval_model(regression_model, train_features, train_labels)))
print("\t- Test set: {:.4f}".format(eval_model(regression_model, test_features, test_labels)))
# OUTDATED
def wbless_eval(model, root):
print("Lexical entailment accuracy on WBLESS:")
wbless_file = os.path.join(root, "data/BLESS_datasets/weeds_bless.json")
with open(wbless_file, "r") as f:
data = json.load(f)
correct_count, total_count = 0.0, 0.0
threshold = 123
alpha = 0.02
correct = []
incorrect = []
for w1, w2, label in data:
if w1 not in model.wv.vocab or w2 not in model.wv.vocab:
continue
total_count += 1
model_score = 0
distance = model.wv.distance(w1, w2)
if distance < threshold:
model_score = 1 - (model.wv.embedding_norm(w2) + alpha) / model.wv.embedding_norm(w1)
if (model_score > 0 and label == 1) or (model_score <= 0 and label == 0):
correct_count += 1
correct.append((w1, w2, label, model_score))
else:
incorrect.append((w1, w2, label, model_score))
print("\t- Accuracy: {:.4f}, from a total of {}/{} pairs".format(
float(correct_count) / total_count, int(total_count), len(data)))
def wordnet_rank(model, root, restrict_vocab=10000):
print("Average rank in WordNet noun closure:")
wordnet_noun_filename = os.path.join(root, "data/wordnet_noun_closure.tsv")
wordnet_pairs = read_wordnet_format(model, wordnet_noun_filename, restrict_vocab=restrict_vocab)
hypo_vectors = model.wv.vectors[wordnet_pairs[:, 2].astype(int)]
target_ranks = []
for hypo_vector, hyper_idx in zip(hypo_vectors, wordnet_pairs[:, 3].astype(int)):
dists = model.wv.distances(hypo_vector, model.wv.vectors[:restrict_vocab])
ranks = stats.rankdata(dists)
target_ranks.append(ranks[hyper_idx])
print("\t- avg rank is {:.4f}".format(np.average(np.array(target_ranks))))
def wordnet_level_rank_vector_norm_correlation(model, root):
print("Correlation between WordNet level rank and vector norms (for noun transitive closure):")
wordnet_noun_filename = os.path.join(root, "data/wordnet_noun_closure.tsv")
wordnet_pairs = read_wordnet_format(model, wordnet_noun_filename)
all_words = np.concatenate((wordnet_pairs[:, 0], wordnet_pairs[:, 1]))
wordnet_levels = np.array([wn.synset(word).max_depth() for word in all_words])
# wordnet_children_depth = []
# for word in all_words:
# children_depth = [w.max_depth() for w in wn.synset(word).hyponyms()]
# if children_depth == []:
# max_children_depth = 0
# else:
# max_children_depth = max(children_depth)
# wordnet_children_depth.append(max(wn.synset(word).max_depth(), max_children_depth))
# wordnet_level_ranks = wordnet_levels / np.array(wordnet_children_depth)
wordnet_level_ranks = wordnet_levels
word_indexes = np.concatenate((wordnet_pairs[:, 2], wordnet_pairs[:, 3])).astype(int)
target_vector_norms = np.linalg.norm(model.wv.vectors[word_indexes], axis=1)
# context_vector_norms = np.linalg.norm(model.trainables.syn1neg[word_indexes], axis=1)
print("\t- Spearman correlation {:.4f}".format(stats.spearmanr(wordnet_level_ranks, target_vector_norms)[0]))
def norms_distribution(model):
print("Vector norm statistics:")
norms = np.linalg.norm(model.wv.vectors, axis=1)
print("\t- Target vectors: avg norm / stddev / min norm / max norm: {:.4f} / {:.4f} / {:.4f} / {:.4f}".format(
np.average(norms), np.std(norms), np.min(norms), np.max(norms)))
norms = np.linalg.norm(model.trainables.syn1neg, axis=1)
print("\t- Context vectors: avg norm / stddev / min norm / max norm: {:.4f} / {:.4f} / {:.4f} / {:.4f}".format(
np.average(norms), np.std(norms), np.min(norms), np.max(norms)))
def norm_freq_correlation(model):
print("Correlation between vector norms and 1.0 / freq:")
word_freq = np.array([model.wv.vocab[word].count for word in model.wv.index2word])
vector_norms = np.linalg.norm(model.wv.vectors, axis=1)
print("\t- Spearman correlation (word rank<10000 / word rank>10000 / all): {:.4f} / {:.4f} / {:.4f}".format(
stats.spearmanr(1.0 / word_freq[:10000], vector_norms[:10000])[0],
stats.spearmanr(1.0 / word_freq[10000:], vector_norms[10000:])[0],
stats.spearmanr(1.0 / word_freq, vector_norms)[0]))
def avg_relative_contrast(model, restrict_vocab=50000, num_samples=100, rank_thresh=100):
print("Relative contrast:")
indexes = random.sample(range(rank_thresh), num_samples) + \
random.sample(range(rank_thresh, min(restrict_vocab, len(MODEL.wv.vocab))), num_samples)
rcs = []
for idx in indexes:
rcs.append(compute_relative_contrast(model, idx, restrict_vocab))
print("\t- Average Relative Contrast (word rank<{}/ word rank>{} / all): {:.4f} / {:.4f} / {:.4f}".format(
rank_thresh, rank_thresh,
np.average(np.array(rcs[:num_samples])),
np.average(np.array(rcs[num_samples:])),
np.average(np.array(rcs))))
# =================== HELPERS ===================
def compute_relative_contrast(model, index, restrict_vocab):
limited = np.delete(model.wv.vectors[:restrict_vocab], index, 0)
dists = model.wv.distances(model.wv.vectors[index], limited)
min_dist = np.min(dists)
mean_dist = np.average(dists)
return mean_dist / (min_dist + 1e-15)
def extract_vector_features(model, dataset, agg_function=(lambda x, y: x-y)):
index0 = dataset[:, 2].astype(int)
index1 = dataset[:, 3].astype(int)
labels = dataset[:, 4].astype(float)
# Use w (target vectors).
features = np.array([agg_function(v1, v2) for v1, v2 in zip(model.wv.vectors[index0], model.wv.vectors[index1])])
return features, labels
def eval_model(regression_model, features, labels):
pred = regression_model.predict(features[:, :])
return stats.spearmanr(pred, labels)[0]
def read_hyperlex_format(model, filename):
with open(filename, "r") as f:
lines = [line.strip().split() for line in f.readlines()[1:]]
result = []
discarded_count = 0
for line in lines:
if line[0] not in model.wv.vocab or line[1] not in model.wv.vocab:
discarded_count += 1
continue
result.append([line[0], line[1], model.wv.vocab[line[0]].index, model.wv.vocab[line[1]].index, float(line[5])])
return np.array(result)
def read_wordnet_format(model, filename, restrict_vocab=500000):
with open(filename, "r") as f:
lines = [line.strip().split() for line in f.readlines()[1:]]
result = []
discarded_count = 0
for line in lines:
hypo, hyper = line[0], line[1]
hypo_word, hyper_word = hypo.split(".")[0], hyper.split(".")[0]
if (hypo_word not in model.wv.vocab or model.wv.vocab[hypo_word].index > restrict_vocab) or \
(hyper_word not in model.wv.vocab or model.wv.vocab[hyper_word].index > restrict_vocab):
discarded_count += 1
continue
result.append([hypo, hyper, model.wv.vocab[hypo_word].index, model.wv.vocab[hyper_word].index])
print("Instances used: ", len(result))
return np.array(result)
# model_fn = os.path.join(ROOT, "models/geometric_emb/w2v_levy_nll_5_100_A01_a0001_n5_w5_c100_poincare_OPTwfullrsgd_SIMcosh-dist-sq_burnin1")
# model_fn = os.path.join(args.root, "models/word2vec_baseline/w2v_levy_sg_5_100_A025_a0001_n5_w5_c100_cosine_OPTsgd")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--restrict_vocab', type=int, default=100000,
help='Size of vocab. Only used for evaluating analogy.')
parser.add_argument('--model_filename', type=str, default='', help='Path to saved model.')
parser.add_argument('--agg_eval', dest='agg_eval', action='store_true',
help='Use w+c during evaluation, instead of just w. Only works for Poincare embeddings.')
parser.add_argument('--cosine_eval', dest='cosine_eval', action='store_true',
help='Use cosine distance during evaluation, instead of the Poincare distance.')
parser.add_argument('--debug', dest='is_debug', action='store_true',
help='Run model in debug mode')
parser.add_argument('--glove', dest='glove', action='store_true',
help='Use a Glove model.')
parser.set_defaults(train=False, agg_eval=False, cosine_dist=False, is_debug=False, glove=False)
args = parser.parse_args()
if args.glove:
MODEL = Glove.load(args.model_filename)
else:
MODEL = gensim.models.Word2Vec.load(args.model_filename)
# hyperlex_unsupervised(MODEL, args.root, args.is_debug)
# hyperlex_supervised(MODEL, args.root, "random")
# hyperlex_supervised(MODEL, args.root, "lexical")
# norm_freq_correlation(MODEL)
# avg_relative_contrast(MODEL)
# norms_distribution(MODEL)
# wbless_eval(MODEL, args.root)
# WordNet metrics.
# wordnet_rank(MODEL, args.root)
# wordnet_level_rank_vector_norm_correlation(MODEL, args.root)
| 13,038 | 44.432056 | 141 | py |
poincare_glove | poincare_glove-master/util_scripts/format_coocc_data.py | # OUTDATED
import sys
filename = sys.argv[1]
basename = filename.rsplit(".", 1)[0]
print(basename)
vocab = {}
with open("/Users/alext/Documents/Master/Thesis/data/wiki_coocc_data/w_freq.csv", "r") as f:
all_lines = f.readlines()
for line in all_lines:
index, word, count = line.strip().split("\t")
vocab[word] = int(index)-1 # indexing starts at 1 in the file
new_filename = basename+"_formatted.tsv"
print(new_filename)
with open(new_filename, "w") as fout:
for line in open(filename, "r"):
w1, w2, count = line.strip().split("\t")
w1_index = vocab[w1]
w2_index = vocab[w2]
fout.write(str(w1_index) + "\t" + str(w2_index) + "\t" + count + "\n")
| 710 | 29.913043 | 92 | py |
poincare_glove | poincare_glove-master/util_scripts/get_vocabulary_from_sim_dataset.py | import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str,
help='File with the similarity pairs from which to extract the vocabulary')
parser.add_argument('--vocab_file', type=str,
help='Filename in which to save the vocabulary.')
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
args = parser.parse_args()
with open(args.input_file, "r") as f:
vocab = {}
for line in f:
if "#" in line:
continue
words = line.strip().split('\t')[:2]
print(words)
| 800 | 33.826087 | 99 | py |
poincare_glove | poincare_glove-master/util_scripts/extract_quads_for_avg_delta_hyperbolicity.py | import argparse
from gensim.models.keyedvectors import VanillaWordEmbeddingsKeyedVectors, Vocab
from glove_code.src.glove_inner import read_all, read_as_neighbor_lists
from numpy import array, uint32, save, median
from random import sample, randint, choice
from timeit import default_timer
PRINT_EVERY = 1
NUM_QUADS = 10
graph_map = {}
def load_vocab(wv, vocab_file, use_glove_format, restrict_vocab):
# Read vocab.
vocab_size = 0
with open(vocab_file, "r") as f:
wv.index2freq = []
all_lines = f.readlines()[:restrict_vocab] if restrict_vocab > 0 else f.readlines()
for index, line in enumerate(all_lines):
if use_glove_format:
word, count = line.strip().split(" ") # vocab is indexed from 0; for co-occ we use 1-based indexing
index = index
else:
index, word, count = line.strip().split("\t")
index = int(index) - 1 # indexing starts at 1 in the file; for co-occ we use 0-based indexing
wv.index2word.append(word)
wv.vocab[word] = Vocab(index=index, count=int(count))
wv.index2freq.append(count)
vocab_size += 1
wv.index2freq = array(wv.index2freq).astype(uint32)
# Unused members from VanillaWordEmbeddingsKeyedVectors.
wv.vectors_norm = None
print("Loaded vocabulary with {} words".format(vocab_size))
return vocab_size
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--use_our_format', dest='use_glove_format', action='store_false',
help='Use our format for reading the vocabulary and the co-occ matrix, instead of the format '
'from the original GloVe code.')
parser.add_argument('--coocc_file', type=str,
help='Filename which contains the coocc matrix in text format.')
parser.add_argument('--vocab_file', type=str,
help='Filename which contains the vocabulary.')
parser.add_argument('--quad_file', type=str,
help='Filename in which to save the list of the quads that will be found.')
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--restrict_vocab', type=int, default=400000,
help='Only use the `restrict_vocab` most frequent words')
parser.add_argument('--vocab_from_file', type=str, default='',
help='Filename from which to extract a vocabulary. Only words from this vocab will be used to'
'get valid quadruples.')
parser.set_defaults(use_glove_format=True)
args = parser.parse_args()
wv = VanillaWordEmbeddingsKeyedVectors(0)
vocab_size = load_vocab(
wv,
vocab_file=args.vocab_file,
use_glove_format=args.use_glove_format,
restrict_vocab=args.restrict_vocab)
num_pairs = read_all(args.use_glove_format, args.coocc_file)
print("Finished first traversal of corpus. Detected a total of {} pairs".format(num_pairs))
limited_vocab = {}
if args.vocab_from_file != '':
with open(args.vocab_from_file, "r") as f:
for line in f:
if "#" in line:
continue
words = line.strip().split('\t')[:2]
if words[0] in wv.vocab:
limited_vocab[wv.vocab[words[0]].index] = words[0]
if words[1] in wv.vocab:
limited_vocab[wv.vocab[words[1]].index] = words[1]
print(len(limited_vocab.keys()))
print(min(limited_vocab.keys()), max(limited_vocab.keys()), median(list(limited_vocab.keys())))
# Load all the co-occ pairs in memory, as a map
print("Reading the adjacency lists.")
start = default_timer()
graph_map = read_as_neighbor_lists(
use_glove_format=args.use_glove_format, filename=args.coocc_file, num_pairs=num_pairs, vocab_size=vocab_size,
limited_vocab=limited_vocab)
print("Finished reading the adjacency lists for {} words in {:.2f}".format(
vocab_size, default_timer() - start
))
# Filter dictionary to only contain keys that have a non-void adjacency list.
graph_map = {k: v for k, v in graph_map.items() if len(v) != 0}
valid_quads_found = []
overall_start, start = default_timer(), default_timer()
while len(valid_quads_found) < NUM_QUADS:
w1, w2, w3, w4 = choice(list(graph_map.keys())), None, None, None
curr_quad = [w1]
if len(graph_map[w1]) == 0:
continue
while not w2 or w2 in curr_quad:
w2 = sample(graph_map[w1], 1)[0]
curr_quad = [w1, w2]
intersection12 = graph_map[w1].intersection(graph_map[w2])
if len(intersection12) == 0:
continue
while not w3 or w3 in curr_quad:
w3 = sample(intersection12, 1)[0]
curr_quad = [w1, w2, w3]
intersection123 = intersection12.intersection(graph_map[w3])
if len(intersection123) == 0:
continue
while not w4 or w4 in curr_quad:
w4 = sample(intersection123, 1)[0]
curr_quad = [w1, w2, w3, w4]
valid_quads_found.append(curr_quad)
if len(valid_quads_found) % PRINT_EVERY == 0:
print("Found {} quads in {:.2f} sec".format(len(valid_quads_found), default_timer()-start))
start = default_timer()
valid_quads_found = array(valid_quads_found)
print("Found {} valid quads in a total of {:.2f} sec".format(
len(valid_quads_found), default_timer() - overall_start))
start = default_timer()
save(args.quad_file, valid_quads_found)
print("Finished writing quads to file in {:.2f} sec".format(default_timer() - start))
| 5,923 | 41.927536 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.