code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import unittest
from common import TestCase
import pyuv
class UtilTest(TestCase):
def test_hrtime(self):
r = pyuv.util.hrtime()
self.assertTrue(r)
def test_freemem(self):
r = pyuv.util.get_free_memory()
self.assertTrue(r)
def test_totalmem(self):
r = pyuv.util.get_total_memory()
self.assertTrue(r)
def test_loadavg(self):
r = pyuv.util.loadavg()
self.assertTrue(r)
def test_uptime(self):
r = pyuv.util.uptime()
self.assertTrue(r)
def test_resident_set_memory(self):
r = pyuv.util.resident_set_memory()
self.assertTrue(r)
def test_interface_addresses(self):
r = pyuv.util.interface_addresses()
self.assertTrue(r)
def test_cpu_info(self):
r = pyuv.util.cpu_info()
self.assertTrue(r)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
fivejjs/pyuv
|
tests/test_util.py
|
Python
|
mit
| 914
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/pants/shared_pants_s14.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","pants_s14")
#### BEGIN MODIFICATIONS ####
result.max_condition = 1000
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/pants/shared_pants_s14.py
|
Python
|
mit
| 478
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/engine/shared_hutt_medium_engine_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/ship/attachment/engine/shared_hutt_medium_engine_s02.py
|
Python
|
mit
| 470
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import tempfile
import itertools
import bz2
import sys
import numpy as np
from gensim import utils, matutils
from gensim.utils import check_output
from subprocess import PIPE
from gensim.models import word2vec, keyedvectors
from testfixtures import log_capture
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class LeeCorpus(object):
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for line in f:
yield utils.simple_preprocess(line)
list_corpus = list(LeeCorpus())
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
new_sentences = [
['computer', 'artificial', 'intelligence'],
['artificial', 'trees'],
['human', 'intelligence'],
['artificial', 'graph'],
['intelligence'],
['artificial', 'intelligence', 'system']
]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_word2vec.tst')
def _rule(word, count, min_count):
if word == "human":
return utils.RULE_DISCARD # throw out
else:
return utils.RULE_DEFAULT # apply default rule, i.e. min_count
def load_on_instance():
# Save and load a Word2Vec Model on instance for test
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
model = word2vec.Word2Vec() # should fail at this point
return model.load(testfile())
class TestWord2VecModel(unittest.TestCase):
def testOnlineLearning(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_hs = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=1, negative=0)
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
self.assertTrue(len(model_hs.wv.vocab), 12)
self.assertTrue(model_hs.wv.vocab['graph'].count, 3)
model_hs.build_vocab(new_sentences, update=True)
model_neg.build_vocab(new_sentences, update=True)
self.assertTrue(model_hs.wv.vocab['graph'].count, 4)
self.assertTrue(model_hs.wv.vocab['artificial'].count, 4)
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertEqual(len(model_neg.wv.vocab), 14)
def testOnlineLearningAfterSave(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
model_neg.save(testfile())
model_neg = word2vec.Word2Vec.load(testfile())
self.assertTrue(len(model_neg.wv.vocab), 12)
model_neg.build_vocab(new_sentences, update=True)
model_neg.train(new_sentences)
self.assertEqual(len(model_neg.wv.vocab), 14)
def onlineSanity(self, model):
terro, others = [], []
for l in list_corpus:
if 'terrorism' in l:
terro.append(l)
else:
others.append(l)
self.assertTrue(all(['terrorism' not in l for l in others]))
model.build_vocab(others)
model.train(others)
self.assertFalse('terrorism' in model.wv.vocab)
model.build_vocab(terro, update=True)
self.assertTrue('terrorism' in model.wv.vocab)
orig0 = np.copy(model.wv.syn0)
model.train(terro)
self.assertFalse(np.allclose(model.wv.syn0, orig0))
sim = model.n_similarity(['war'], ['terrorism'])
self.assertLess(0., sim)
def test_sg_hs_online(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_sg_neg_online(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_hs_online(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_neg_online(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, seed=42, workers=2, sample=0)
self.onlineSanity(model)
def testPersistence(self):
"""Test storing/loading the entire model."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(testfile())
loaded_wv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(np.allclose(wv.syn0, loaded_wv.syn0))
self.assertEqual(len(wv.vocab), len(loaded_wv.vocab))
def testPersistenceWithConstructorRule(self):
"""Test storing/loading the entire model with a vocab trimming rule passed in the constructor."""
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
def testRuleWithMinCount(self):
"""Test that returning RULE_DEFAULT from trim_rule triggers min_count."""
model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
self.assertTrue("occurs_only_once" not in model.wv.vocab)
self.assertTrue("interface" in model.wv.vocab)
def testRule(self):
"""Test applying vocab trim_rule to build_vocab instead of constructor."""
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
def testLambdaRule(self):
"""Test that lambda trim_rule works."""
rule = lambda word, count, min_count: utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule)
self.assertTrue("human" not in model.wv.vocab)
def testSyn0NormNotSaved(self):
"""Test syn0norm isn't saved in model file"""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.save(testfile())
loaded_model = word2vec.Word2Vec.load(testfile())
self.assertTrue(loaded_model.wv.syn0norm is None)
wv = model.wv
wv.save(testfile())
loaded_kv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(loaded_kv.syn0norm is None)
def testLoadPreKeyedVectorModel(self):
"""Test loading pre-KeyedVectors word2vec model"""
if sys.version_info[:2] == (3,4):
model_file_suffix = '_py3_4'
elif sys.version_info < (3,):
model_file_suffix = '_py2'
else:
model_file_suffix = '_py3'
# Model stored in one file
model_file = 'word2vec_pre_kv%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
# Model stored in multiple files
model_file = 'word2vec_pre_kv_sep%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
def testLoadPreKeyedVectorModelCFormat(self):
"""Test loading pre-KeyedVectors word2vec model saved in word2vec format"""
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
self.assertTrue(model.syn0.shape[0] == len(model.vocab))
def testPersistenceWord2VecFormat(self):
"""Test storing/loading the entire model in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=True)
binary_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model_kv.init_sims(replace=False)
self.assertTrue(np.allclose(model['human'], binary_model_kv['human']))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
norm_only_model.init_sims(replace=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human']))
limited_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, limit=3)
self.assertEquals(len(limited_model_kv.syn0), 3)
half_precision_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, datatype=np.float16)
self.assertEquals(binary_model_kv.syn0.nbytes, half_precision_model_kv.syn0.nbytes * 2)
def testNoTrainingCFormat(self):
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=True)
kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model = word2vec.Word2Vec()
binary_model.wv = kv
self.assertRaises(ValueError, binary_model.train, sentences)
def testTooShortBinaryWord2VecFormat(self):
tfile = testfile()
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=True)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=True)
def testTooShortTextWord2VecFormat(self):
tfile = testfile()
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=False)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=False)
def testPersistenceWord2VecFormatNonBinary(self):
"""Test storing/loading the entire model in word2vec non-binary format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=False)
text_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
text_model.init_sims(False)
self.assertTrue(np.allclose(model['human'], text_model['human'], atol=1e-6))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
norm_only_model.init_sims(True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human'], atol=1e-6))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human'], atol=1e-4))
def testPersistenceWord2VecFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, binary_model_with_vocab_kv.vocab['human'].count)
def testPersistenceKeyedVectorsFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
kv_binary_model_with_vocab = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, kv_binary_model_with_vocab.vocab['human'].count)
def testPersistenceWord2VecFormatCombinationWithStandardPersistence(self):
"""Test storing/loading the entire model and vocabulary in word2vec format chained with
saving and loading via `save` and `load` methods`.
It was possible prior to 1.0.0 release, now raises Exception"""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv.save(testfile())
self.assertRaises(AttributeError, word2vec.Word2Vec.load, testfile())
def testLargeMmap(self):
"""Test storing/loading the entire model."""
model = word2vec.Word2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
# make sure mmaping the arrays back works, too
self.models_equal(model, word2vec.Word2Vec.load(testfile(), mmap='r'))
def testVocab(self):
"""Test word2vec vocabulary building."""
corpus = LeeCorpus()
total_words = sum(len(sentence) for sentence in corpus)
# try vocab building explicitly, using all words
model = word2vec.Word2Vec(min_count=1, hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 6981)
# with min_count=1, we're not throwing away anything, so make sure the word counts add up to be the entire corpus
self.assertEqual(sum(v.count for v in model.wv.vocab.values()), total_words)
# make sure the binary codes are correct
np.allclose(model.wv.vocab['the'].code, [1, 1, 0, 0])
# test building vocab with default params
model = word2vec.Word2Vec(hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 1750)
np.allclose(model.wv.vocab['the'].code, [1, 1, 1, 0])
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, word2vec.Word2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=total_words+1)
def testTraining(self):
"""Test word2vec training."""
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
self.models_equal(model, model2)
def testScoring(self):
"""Test word2vec scoring."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
# just score and make sure they exist
scores = model.score(sentences, len(sentences))
self.assertEqual(len(scores), len(sentences))
def testLocking(self):
"""Test word2vec training doesn't change locked vectors."""
corpus = LeeCorpus()
# build vocabulary, don't train yet
for sg in range(2): # test both cbow and sg
model = word2vec.Word2Vec(size=4, hs=1, negative=5, min_count=1, sg=sg, window=5)
model.build_vocab(corpus)
# remember two vectors
locked0 = np.copy(model.wv.syn0[0])
unlocked1 = np.copy(model.wv.syn0[1])
# lock the vector in slot 0 against change
model.syn0_lockf[0] = 0.0
model.train(corpus)
self.assertFalse((unlocked1 == model.wv.syn0[1]).all()) # unlocked vector should vary
self.assertTrue((locked0 == model.wv.syn0[0]).all()) # locked vector should not vary
def testAccuracy(self):
"""Test Word2Vec accuracy and KeyedVectors accuracy give the same result"""
model = word2vec.Word2Vec(LeeCorpus())
w2v_accuracy = model.accuracy(datapath('questions-words.txt'))
kv_accuracy = model.wv.accuracy(datapath('questions-words.txt'))
self.assertEqual(w2v_accuracy, kv_accuracy)
def testEvaluateWordPairs(self):
"""Test Spearman and Pearson correlation coefficients give sane results on similarity datasets"""
corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2'))
model = word2vec.Word2Vec(corpus, min_count=3, iter=10)
correlation = model.evaluate_word_pairs(datapath('wordsim353.tsv'))
pearson = correlation[0][0]
spearman = correlation[1][0]
oov = correlation[2]
self.assertTrue(0.1 < pearson < 1.0)
self.assertTrue(0.1 < spearman < 1.0)
self.assertTrue(0.0 <= oov < 90.0)
def model_sanity(self, model, train=True):
"""Even tiny models trained on LeeCorpus should pass these sanity checks"""
# run extra before/after training tests if train=True
if train:
model.build_vocab(list_corpus)
orig0 = np.copy(model.wv.syn0[0])
model.train(list_corpus)
self.assertFalse((orig0 == model.wv.syn0[1]).all()) # vector should vary after training
sims = model.most_similar('war', topn=len(model.wv.index2word))
t_rank = [word for word, score in sims].index('terrorism')
# in >200 calibration runs w/ calling parameters, 'terrorism' in 50-most_sim for 'war'
self.assertLess(t_rank, 50)
war_vec = model['war']
sims2 = model.most_similar([war_vec], topn=51)
self.assertTrue('war' in [word for word, score in sims2])
self.assertTrue('terrorism' in [word for word, score in sims2])
def test_sg_hs(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_sg_neg(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_cbow_hs(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=8, hs=1, negative=0,
min_count=5, iter=10, workers=2, batch_words=1000)
self.model_sanity(model)
def test_cbow_neg(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, workers=2, sample=0)
self.model_sanity(model)
def test_cosmul(self):
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
sims = model.most_similar_cosmul('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar_cosmul(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testTrainingCbow(self):
"""Test CBOW word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=1, negative=0)
self.models_equal(model, model2)
def testTrainingSgNegative(self):
"""Test skip-gram (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=0, negative=2)
self.models_equal(model, model2)
def testTrainingCbowNegative(self):
"""Test CBOW (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=0, negative=2)
self.models_equal(model, model2)
def testSimilarities(self):
"""Test similarity and n_similarity methods."""
# The model is trained using CBOW
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
model.train(sentences)
self.assertTrue(model.n_similarity(['graph', 'trees'], ['trees', 'graph']))
self.assertTrue(model.n_similarity(['graph'], ['trees']) == model.similarity('graph', 'trees'))
self.assertRaises(ZeroDivisionError, model.n_similarity, ['graph', 'trees'], [])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], ['graph', 'trees'])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], [])
def testSimilarBy(self):
"""Test word2vec similar_by_word and similar_by_vector."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
wordsims = model.similar_by_word('graph', topn=10)
wordsims2 = model.most_similar(positive='graph', topn=10)
vectorsims = model.similar_by_vector(model['graph'], topn=10)
vectorsims2 = model.most_similar([model['graph']], topn=10)
self.assertEqual(wordsims, wordsims2)
self.assertEqual(vectorsims, vectorsims2)
def testParallel(self):
"""Test word2vec parallel training."""
if word2vec.FAST_VERSION < 0: # don't test the plain np version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(LeeCorpus(), 10000)
for workers in [2, 4]:
model = word2vec.Word2Vec(corpus, workers=workers)
sims = model.most_similar('israeli')
# the exact vectors and therefore similarities may differ, due to different thread collisions/randomization
# so let's test only for top3
# TODO: commented out for now; find a more robust way to compare against "gold standard"
# self.assertTrue('palestinian' in [sims[i][0] for i in range(3)])
def testRNG(self):
"""Test word2vec results identical with identical RNG seed."""
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
self.models_equal(model, model2)
def models_equal(self, model, model2):
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
most_common_word = max(model.wv.vocab.items(), key=lambda item: item[1].count)[0]
self.assertTrue(np.allclose(model[most_common_word], model2[most_common_word]))
def testDeleteTemporaryTrainingData(self):
"""Test word2vec model after delete_temporary_training_data"""
for i in [0, 1]:
for j in [0, 1]:
model = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertTrue(len(model['human']), 10)
self.assertTrue(len(model.wv.vocab), 12)
self.assertTrue(model.wv.vocab['graph'].count, 3)
self.assertTrue(not hasattr(model, 'syn1'))
self.assertTrue(not hasattr(model, 'syn1neg'))
self.assertTrue(not hasattr(model, 'syn0_lockf'))
def testNormalizeAfterTrainingData(self):
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
norm_only_model = word2vec.Word2Vec.load(testfile())
norm_only_model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
@log_capture()
def testBuildVocabWarning(self, l):
"""Test if warning is raised on non-ideal input to a word2vec model"""
sentences = ['human', 'machine']
model = word2vec.Word2Vec()
model.build_vocab(sentences)
warning = "Each 'sentences' item should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(l))
@log_capture()
def testTrainWarning(self, l):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
sentences = [['human'],
['graph', 'trees']]
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(l))
def test_sentences_should_not_be_a_generator(self):
"""
Is sentences a generator object?
"""
gen = (s for s in sentences)
self.assertRaises(TypeError, word2vec.Word2Vec, (gen,))
def testLoadOnClassError(self):
"""Test if exception is raised when loading word2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
#endclass TestWord2VecModel
class TestWMD(unittest.TestCase):
def testNonzero(self):
'''Test basic functionality with a test sentence.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence1, sentence2)
# Check that distance is non-zero.
self.assertFalse(distance == 0.0)
def testSymmetry(self):
'''Check that distance is symmetric.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance1 = model.wmdistance(sentence1, sentence2)
distance2 = model.wmdistance(sentence2, sentence1)
self.assertTrue(np.allclose(distance1, distance2))
def testIdenticalSentences(self):
'''Check that the distance from a sentence to itself is zero.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=1)
sentence = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence, sentence)
self.assertEqual(0.0, distance)
class TestWord2VecSentenceIterators(unittest.TestCase):
def testLineSentenceWorksWithFilename(self):
"""Does LineSentence work with a filename argument?"""
with utils.smart_open(datapath('lee_background.cor')) as orig:
sentences = word2vec.LineSentence(datapath('lee_background.cor'))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithCompressedFile(self):
"""Does LineSentence work with a compressed file object argument?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
sentences = word2vec.LineSentence(bz2.BZ2File(datapath('head500.noblanks.cor.bz2')))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithNormalFile(self):
"""Does LineSentence work with a file object argument, rather than filename?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
with utils.smart_open(datapath('head500.noblanks.cor')) as fin:
sentences = word2vec.LineSentence(fin)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
#endclass TestWord2VecSentenceIterators
# TODO: get correct path to Python binary
# class TestWord2VecScripts(unittest.TestCase):
# def testWord2VecStandAloneScript(self):
# """Does Word2Vec script launch standalone?"""
# cmd = 'python -m gensim.scripts.word2vec_standalone -train ' + datapath('testcorpus.txt') + ' -output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3 -min_count 1'
# output = check_output(cmd, stderr=PIPE)
# self.assertEqual(output, '0')
# #endclass TestWord2VecScripts
if not hasattr(TestWord2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestWord2VecModel, 'assertLess', assertLess)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
logging.info("using optimization %s", word2vec.FAST_VERSION)
unittest.main()
|
duyet-website/api.duyet.net
|
lib/gensim/test/test_word2vec.py
|
Python
|
mit
| 34,696
|
from protocols.forms import forms
from core.utils import SPEED_UNITS
class CentrifugeForm(forms.VerbForm):
name = "Centrifuge"
slug = "centrifuge"
has_machine = True
# duration = forms.IntegerField(help_text='this is the minimal time this should take', initial = 'sec')
# min_speed = forms.IntegerField()
# max_speed = forms.IntegerField(required = False)
# speed_units = forms.ChoiceField(required=False, choices = SPEED_UNITS, initial = 'rpm' )
# speed_comment = forms.CharField(required=False)
# comment_why = forms.CharField(required = False)
|
Bionetbook/bionetbook
|
bnbapp/protocols/forms/verbs/centrifuge.py
|
Python
|
mit
| 585
|
# -*- coding:utf-8 -*-
import scrapy
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import now_time, end_time
class HNU001_Spider(scrapy.Spider):
name = 'HNU001'
start_urls = ['http://csee.hnu.edu.cn/Front/TZXX_List?LMXX_BH=20130728174138ec48068e-48bf-49a6-ac51-27d04a9b1baa']
domain = 'http://csee.hnu.edu.cn/'
def parse(self, response):
messages = response.xpath("//ul[@class='article-list']/li")
for i, message in enumerate(messages):
report_name = message.xpath(".//a/text()").extract()[0]
report_time = get_localtime(message.xpath("span/text()").extract()[0].strip().strip("[]"))
if report_time > end_time:
continue
if report_time < now_time:
return
report_url = self.domain + message.xpath(".//a/@href").extract()[0][1:]
yield scrapy.Request(report_url, callback=self.parse_pages,
meta={'link': report_url, 'number': i + 1, 'publication': report_time, 'title': report_name})
def parse_pages(self, response):
messages = response.xpath("//div[@class='content-1']")
return {'text': messages, 'number': response.meta['number'], 'organizer': u"湖南大学大学信息科学与工程学院",
'faculty': self.name, 'link': response.meta['link'], 'publication': response.meta['publication'],
'location': u"华中:湖南省-长沙市", 'title': response.meta['title']}
|
AnselCmy/ARPS
|
report_crawler/report_crawler/spiders/spiders_001/_H/HNU001.py
|
Python
|
mit
| 1,423
|
"""
This file contains the unit tests for the :mod:`communication` app.
Since this app has no models there is model and view tests:
* :class:`~communication.tests.CommunicationModelTests`
* :class:`~communication.tests.CommunicationViewTests`
"""
from lab_website.tests import BasicTests
from communication.models import LabAddress,LabLocation,Post
from personnel.models import Address, Person
from papers.models import Publication
from projects.models import Project
class CommunicationModelTests(BasicTests):
'''This class tests the views associated with models in the :mod:`communication` app.'''
fixtures = ['test_address',]
def test_create_new_lab_address(self):
'''This test creates a :class:`~communication.models.LabAddress` with the required information.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
def test_lab_address_unicode(self):
'''This tests the unicode representation of a :class:`~communication.models.LabAddress`.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
self.assertEqual(test_address.__unicode__(), Address.objects.get(pk=1).__unicode__())
def test_create_new_lab_location(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_create_new_lab_location_all(self):
'''This test creates a :class:`~communication.models.LabLocation` with all fields included.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1,
address=Address.objects.get(pk=1),
url = 'www.cityofmemphis.org',
description = 'some description about the place',
lattitude = 35.149534,
longitude = -90.04898,) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_lab_location_unicode(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1)
self.assertEqual(test_location.__unicode__(), 'Memphis')
class CommunicationViewTests(BasicTests):
'''This class tests the views associated with the :mod:`communication` app.'''
def test_feed_details_view(self):
"""This tests the feed-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/feeds')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'feed_details.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
def test_lab_rules_view(self):
'''This tests the lab-rules view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/lab-rules')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_rules.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lab_rules' in test_response.context)
self.assertTrue('lab_rules_source' in test_response.context)
def test_lab_rules_view(self):
'''This tests the data-resource-sharing view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/data-resource-sharing')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'data_sharing_policy.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('data_sharing_policy' in test_response.context)
self.assertTrue('data_sharing_policy_source' in test_response.context)
def test_twitter_view(self):
'''This tests the twitter view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/twitter')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'twitter_timeline.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('timeline' in test_response.context)
def test_calendar_view(self):
'''This tests the google-calendar view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/calendar')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'calendar.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
#
# def test_wikipedia_view(self):
# '''This tests the google-calendar view.
#
# Currently it just ensures that the template is loading correctly.
# '''
# test_response = self.client.get('/wikipedia')
# self.assertEqual(test_response.status_code, 200)
# self.assertTemplateUsed(test_response, 'wikipedia_edits.html')
# self.assertTemplateUsed(test_response, 'base.html')
# self.assertTemplateUsed(test_response, 'jquery_script.html')
# self.assertTrue('pages' in test_response.context)
def test_news_view(self):
'''This tests the lab-news view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/news')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_news.html')
self.assertTemplateUsed(test_response, 'base.html')
#self.assertTrue('statuses' in test_response.context)
self.assertTrue('links' in test_response.context)
#self.assertTrue('milestones' in test_response.context)
def test_contact_page(self):
'''This tests the contact-page view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/contact/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'contact.html')
self.assertTemplateUsed(test_response, 'base.html')
def test_location_page(self):
'''This tests the location view.
Currently it ensures that the template is loading, and that that the location_list context is passed.
'''
test_response = self.client.get('/location')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'location.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lablocation_list' in test_response.context)
class PostModelTests(BasicTests):
'''This class tests various aspects of the :class:`~papers.models.Post` model.'''
fixtures = ['test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_create_new_post_minimum(self):
'''This test creates a :class:`~papers.models.Post` with the required information only.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_create_new_post_all(self):
'''This test creates a :class:`~papers.models.Post` with all fields entered.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md',
paper = Publication.objects.get(pk=1),
project = Project.objects.get(pk=1))
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_post_unicode(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.__unicode__(), "Test Post")
def test_post_slugify(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.post_slug, "test-post")
class PostViewTests(BasicTests):
'''These test the views associated with post objects.'''
fixtures = ['test_post','test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_post_details_view(self):
"""This tests the post-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post' in test_response.context)
test_response = self.client.get('/posts/not-a-fixture-post')
self.assertEqual(test_response.status_code, 404)
def test_post_list(self):
"""This tests the post-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post_list' in test_response.context)
def test_post_new(self):
"""This tests the post-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_post_edit(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/edit')
self.assertEqual(test_response.status_code, 404)
def test_post_delete(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/delete')
self.assertEqual(test_response.status_code, 404)
|
davebridges/Lab-Website
|
communication/tests.py
|
Python
|
mit
| 14,526
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class Deployment(ProxyOnlyResource):
"""User crendentials used for publishing activity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param deployment_id: Identifier for deployment.
:type deployment_id: str
:param status: Deployment status.
:type status: int
:param message: Details about deployment status.
:type message: str
:param author: Who authored the deployment.
:type author: str
:param deployer: Who performed the deployment.
:type deployer: str
:param author_email: Author email.
:type author_email: str
:param start_time: Start time.
:type start_time: datetime
:param end_time: End time.
:type end_time: datetime
:param active: True if deployment is currently active, false if completed
and null if not started.
:type active: bool
:param details: Details on deployment.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployment_id': {'key': 'properties.id', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'int'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'author_email': {'key': 'properties.authorEmail', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'active': {'key': 'properties.active', 'type': 'bool'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(self, kind=None, deployment_id=None, status=None, message=None, author=None, deployer=None, author_email=None, start_time=None, end_time=None, active=None, details=None):
super(Deployment, self).__init__(kind=kind)
self.deployment_id = deployment_id
self.status = status
self.message = message
self.author = author
self.deployer = deployer
self.author_email = author_email
self.start_time = start_time
self.end_time = end_time
self.active = active
self.details = details
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/deployment.py
|
Python
|
mit
| 3,290
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_naboo_human_female_07.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_human_female_07.py
|
Python
|
mit
| 465
|
"""
Given an integer, convert it to a roman numeral.
Input is guaranteed to be within the range from 1 to 3999.
"""
class Solution:
# @return a string
def intToRoman(self, num):
digits = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD' ),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')]
result = ""
for digit in digits:
while num >= digit[0]:
result += digit[1]
num -= digit[0]
if num == 0:
break
return result
|
cyandterry/Python-Study
|
Ninja/Leetcode/12_Integer_to_Roman.py
|
Python
|
mit
| 614
|
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from polymorphic import PolymorphicModel
from django.db.models import F
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from celery.exceptions import SoftTimeLimitExceeded
from .jenkins import get_job_status
from .alert import (send_alert, AlertPlugin, AlertPluginUserData, update_alert_plugins)
from .calendar import get_events
from .graphite import parse_metric
from .graphite import get_data
from .tasks import update_service, update_instance
from datetime import datetime, timedelta
from django.utils import timezone
import json
import re
import time
import os
import subprocess
import requests
from celery.utils.log import get_task_logger
RAW_DATA_LIMIT = 5000
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
class CheckGroupMixin(models.Model):
class Meta:
abstract = True
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
alerts = models.ManyToManyField(
'AlertPlugin',
blank=True,
help_text='Alerts channels through which you wish to be notified'
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js'
)
def __unicode__(self):
return self.name
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
self.snapshot.did_send_alert = True
self.snapshot.save()
send_alert(self, duty_officers=get_duty_officers())
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24)))
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapshots
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS)
class Service(CheckGroupMixin):
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = ServiceStatusSnapshot(
service=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS):
self.alert()
instances = models.ManyToManyField(
'Instance',
blank=True,
help_text='Instances this service is running on.',
)
url = models.TextField(
blank=True,
help_text="URL of service."
)
class Meta:
ordering = ['name']
class Instance(CheckGroupMixin):
def duplicate(self):
checks = self.status_checks.all()
new_instance = self
new_instance.pk = None
new_instance.id = None
new_instance.name = u"Copy of %s" % self.name
new_instance.save()
for check in checks:
check.duplicate(inst_set=(new_instance,), serv_set=())
return new_instance.pk
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = InstanceStatusSnapshot(
instance=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
class Meta:
ordering = ['name']
address = models.TextField(
blank=True,
help_text="Address (IP/Hostname) of service."
)
def icmp_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck')
def active_icmp_status_checks(self):
return self.icmp_status_checks().filter(active=True)
def delete(self, *args, **kwargs):
self.icmp_status_checks().delete()
return super(Instance, self).delete(*args, **kwargs)
class Snapshot(models.Model):
class Meta:
abstract = True
time = models.DateTimeField(db_index=True)
num_checks_active = models.IntegerField(default=0)
num_checks_passing = models.IntegerField(default=0)
num_checks_failing = models.IntegerField(default=0)
overall_status = models.TextField(default=Service.PASSING_STATUS)
did_send_alert = models.IntegerField(default=False)
class ServiceStatusSnapshot(Snapshot):
service = models.ForeignKey(Service, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.service.name, self.overall_status)
class InstanceStatusSnapshot(Snapshot):
instance = models.ForeignKey(Instance, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.instance.name, self.overall_status)
class StatusCheck(PolymorphicModel):
"""
Base class for polymorphic models. We're going to use
proxy models for inheriting because it makes life much simpler,
but this allows us to stick different methods etc on subclasses.
You can work out what (sub)class a model is an instance of by accessing `instance.polymorphic_ctype.model`
We are using django-polymorphic for polymorphism
"""
# Common attributes to all
name = models.TextField()
active = models.BooleanField(
default=True,
help_text='If not active, check will not be used to calculate service status and will not trigger alerts.',
)
importance = models.CharField(
max_length=30,
choices=Service.IMPORTANCES,
default=Service.ERROR_STATUS,
help_text='Severity level of a failure. Critical alerts are for failures you want to wake you up at 2am, Errors are things you can sleep through but need to fix in the morning, and warnings for less important things.'
)
frequency = models.IntegerField(
default=5,
help_text='Minutes between each check.',
)
debounce = models.IntegerField(
default=0,
null=True,
help_text='Number of successive failures permitted before check will be marked as failed. Default is 0, i.e. fail on first failure.'
)
created_by = models.ForeignKey(User, null=True)
calculated_status = models.CharField(
max_length=50, choices=Service.STATUSES, default=Service.CALCULATED_PASSING_STATUS, blank=True)
last_run = models.DateTimeField(null=True)
cached_health = models.TextField(editable=False, null=True)
# Graphite checks
metric = models.TextField(
null=True,
help_text='fully.qualified.name of the Graphite metric you want to watch. This can be any valid Graphite expression, including wildcards, multiple hosts, etc.',
)
check_type = models.CharField(
choices=CHECK_TYPES,
max_length=100,
null=True,
)
value = models.TextField(
null=True,
help_text='If this expression evaluates to true, the check will fail (possibly triggering an alert).',
)
expected_num_hosts = models.IntegerField(
default=0,
null=True,
help_text='The minimum number of data series (hosts) you expect to see.',
)
allowed_num_failures = models.IntegerField(
default=0,
null=True,
help_text='The maximum number of data series (metrics) you expect to fail. For example, you might be OK with 2 out of 3 webservers having OK load (1 failing), but not 1 out of 3 (2 failing).',
)
# HTTP checks
endpoint = models.TextField(
null=True,
help_text='HTTP(S) endpoint to poll.',
)
username = models.TextField(
blank=True,
null=True,
help_text='Basic auth username.',
)
password = models.TextField(
blank=True,
null=True,
help_text='Basic auth password.',
)
text_match = models.TextField(
blank=True,
null=True,
help_text='Regex to match against source of page.',
)
status_code = models.TextField(
default=200,
null=True,
help_text='Status code expected from endpoint.'
)
timeout = models.IntegerField(
default=30,
null=True,
help_text='Time out after this many seconds.',
)
verify_ssl_certificate = models.BooleanField(
default=True,
help_text='Set to false to allow not try to verify ssl certificates (default True)',
)
# Jenkins checks
max_queued_build_time = models.IntegerField(
null=True,
blank=True,
help_text='Alert if build queued for more than this many minutes.',
)
class Meta(PolymorphicModel.Meta):
ordering = ['name']
def __unicode__(self):
return self.name
def recent_results(self):
# Not great to use id but we are getting lockups, possibly because of something to do with index
# on time_complete
return StatusCheckResult.objects.filter(check=self).order_by('-id').defer('raw_data')[:10]
def last_result(self):
try:
return StatusCheckResult.objects.filter(check=self).order_by('-id').defer('raw_data')[0]
except:
return None
def run(self):
start = timezone.now()
try:
result = self._run()
except SoftTimeLimitExceeded as e:
result = StatusCheckResult(check=self)
result.error = u'Error in performing check: Celery soft time limit exceeded'
result.succeeded = False
except Exception as e:
result = StatusCheckResult(check=self)
logger.error(u"Error performing check: %s" % (e.message,))
result.error = u'Error in performing check: %s' % (e.message,)
result.succeeded = False
finish = timezone.now()
result.time = start
result.time_complete = finish
result.save()
self.last_run = finish
self.save()
def _run(self):
"""
Implement on subclasses. Should return a `CheckResult` instance.
"""
raise NotImplementedError('Subclasses should implement')
def save(self, *args, **kwargs):
if self.last_run:
recent_results = list(self.recent_results())
if calculate_debounced_passing(recent_results, self.debounce):
self.calculated_status = Service.CALCULATED_PASSING_STATUS
else:
self.calculated_status = Service.CALCULATED_FAILING_STATUS
self.cached_health = serialize_recent_results(recent_results)
try:
updated = StatusCheck.objects.get(pk=self.pk)
except StatusCheck.DoesNotExist as e:
logger.error('Cannot find myself (check %s) in the database, presumably have been deleted' % self.pk)
return
else:
self.cached_health = ''
self.calculated_status = Service.CALCULATED_PASSING_STATUS
ret = super(StatusCheck, self).save(*args, **kwargs)
self.update_related_services()
self.update_related_instances()
return ret
def duplicate(self, inst_set=(), serv_set=()):
new_check = self
new_check.pk = None
new_check.id = None
new_check.last_run = None
new_check.save()
for linked in list(inst_set) + list(serv_set):
linked.status_checks.add(new_check)
return new_check.pk
def update_related_services(self):
services = self.service_set.all()
for service in services:
update_service.delay(service.id)
def update_related_instances(self):
instances = self.instance_set.all()
for instance in instances:
update_instance.delay(instance.id)
class ICMPStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "ICMP/Ping Check"
def _run(self):
result = StatusCheckResult(check=self)
instances = self.instance_set.all()
target = self.instance_set.get().address
# We need to read both STDOUT and STDERR because ping can write to both, depending on the kind of error. Thanks a lot, ping.
ping_process = subprocess.Popen("ping -c 1 " + target, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
response = ping_process.wait()
if response == 0:
result.succeeded = True
else:
output = ping_process.stdout.read()
result.succeeded = False
result.error = output
return result
class GraphiteStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Metric check"
def format_error_message(self, failure_value, actual_hosts, actual_failures):
"""
A summary of why the check is failing for inclusion in short alert messages
Returns something like:
"5.0 > 4 | 1/2 hosts"
"""
if isinstance(failure_value, (list, tuple)):
failure_value = ', '.join([u'%0.1f' % v for v in failure_value])
else:
failure_value = u'%0.1f' % failure_value
hosts_string = u''
failures_string = u''
if self.expected_num_hosts > 0:
hosts_string = u' | %s/%s hosts' % (actual_hosts,
self.expected_num_hosts)
if self.expected_num_hosts > actual_hosts:
return u'Hosts missing%s' % hosts_string
if self.allowed_num_failures and actual_failures:
failures_string = u' | %s/%s series failing (%s allowed)' % (
actual_failures,
actual_hosts,
self.allowed_num_failures,
)
if failure_value is None:
return "Failed to get metric from Graphite"
return u"%s %s %0.1f%s%s" % (
failure_value,
self.check_type,
float(self.value),
hosts_string,
failures_string,
)
def _run(self):
result = StatusCheckResult(check=self)
failures = []
graphite_output = parse_metric(self.metric, mins_to_check=self.frequency)
if graphite_output['num_series_with_data'] > 0:
result.average_value = graphite_output['average_value']
failed = False
for s in graphite_output['series']:
failure_value = None
if self.check_type == '<':
failed = float(s['min']) < float(self.value)
if failed:
failure_value = s['min']
elif self.check_type == '<=':
failed = float(s['min']) <= float(self.value)
if failed:
failure_value = s['min']
elif self.check_type == '>':
failed = float(s['max']) > float(self.value)
if failed:
failure_value = s['max']
elif self.check_type == '>=':
failed = float(s['max']) >= float(self.value)
if failed:
failure_value = s['max']
elif self.check_type == '==':
failed = float(self.value) in s['values']
if failed:
failure_value = float(self.value)
else:
raise Exception(u'Check type %s not supported' %
self.check_type)
if not failure_value is None:
failures.append(failure_value)
allowed_num_failures = self.allowed_num_failures or 0
# If there are more than expected failures
if len(failures) - self.allowed_num_failures > 0:
result.succeeded = False
else:
if graphite_output['error']:
result.succeeded = False
if graphite_output['num_series_with_data'] < self.expected_num_hosts:
result.succeeded = False
else:
result.succeeded = True
try:
result.raw_data = json.dumps(graphite_output['raw'])
except:
result.raw_data = graphite_output['raw']
if not result.succeeded:
result.error = self.format_error_message(
failures,
graphite_output['num_series_with_data'],
len(failures),
)
return result
class HttpStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "HTTP check"
def _run(self):
result = StatusCheckResult(check=self)
auth = None
if self.username or self.password:
auth = (self.username, self.password)
try:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
auth=auth,
headers={
"User-Agent": settings.HTTP_USER_AGENT,
},
)
except requests.RequestException as e:
result.error = u'Request error occurred: %s' % (e.message,)
result.succeeded = False
else:
if self.status_code and resp.status_code != int(self.status_code):
result.error = u'Wrong code: got %s (expected %s)' % (
resp.status_code, int(self.status_code))
result.succeeded = False
result.raw_data = resp.content
elif self.text_match:
if not re.search(self.text_match, resp.content):
result.error = u'Failed to find match regex /%s/ in response body' % self.text_match
result.raw_data = resp.content
result.succeeded = False
else:
result.succeeded = True
else:
result.succeeded = True
return result
class JenkinsStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Jenkins check"
@property
def failing_short_status(self):
return 'Job failing on Jenkins'
def _run(self):
result = StatusCheckResult(check=self)
try:
status = get_job_status(self.name)
active = status['active']
result.job_number = status['job_number']
if status['status_code'] == 404:
result.error = u'Job %s not found on Jenkins' % self.name
result.succeeded = False
return result
elif status['status_code'] > 400:
# Will fall through to next block
raise Exception(u'returned %s' % status['status_code'])
except Exception as e:
# If something else goes wrong, we will *not* fail - otherwise
# a lot of services seem to fail all at once.
# Ugly to do it here but...
result.error = u'Error fetching from Jenkins - %s' % e.message
result.succeeded = True
return result
if not active:
# We will fail if the job has been disabled
result.error = u'Job "%s" disabled on Jenkins' % self.name
result.succeeded = False
else:
if self.max_queued_build_time and status['blocked_build_time']:
if status['blocked_build_time'] > self.max_queued_build_time * 60:
result.succeeded = False
result.error = u'Job "%s" has blocked build waiting for %ss (> %sm)' % (
self.name,
int(status['blocked_build_time']),
self.max_queued_build_time,
)
else:
result.succeeded = status['succeeded']
else:
result.succeeded = status['succeeded']
if not status['succeeded']:
if result.error:
result.error += u'; Job "%s" failing on Jenkins' % self.name
else:
result.error = u'Job "%s" failing on Jenkins' % self.name
result.raw_data = status
return result
class StatusCheckResult(models.Model):
"""
We use the same StatusCheckResult model for all check types,
because really they are not so very different.
Checks don't have to use all the fields, so most should be
nullable
"""
check = models.ForeignKey(StatusCheck)
time = models.DateTimeField(null=False, db_index=True)
time_complete = models.DateTimeField(null=True, db_index=True)
raw_data = models.TextField(null=True)
succeeded = models.BooleanField(default=False)
error = models.TextField(null=True)
# Jenkins specific
job_number = models.PositiveIntegerField(null=True)
class Meta:
ordering = ['-time_complete']
def __unicode__(self):
return '%s: %s @%s' % (self.status, self.check.name, self.time)
@property
def status(self):
if self.succeeded:
return 'succeeded'
else:
return 'failed'
@property
def took(self):
"""
Time taken by check in ms
"""
try:
diff = self.time_complete - self.time
return (diff.microseconds + (diff.seconds + diff.days * 24 * 3600) * 10**6) / 1000
except:
return None
@property
def short_error(self):
snippet_len = 30
if len(self.error) > snippet_len:
return u"%s..." % self.error[:snippet_len - 3]
else:
return self.error
def save(self, *args, **kwargs):
if isinstance(self.raw_data, basestring):
self.raw_data = self.raw_data[:RAW_DATA_LIMIT]
return super(StatusCheckResult, self).save(*args, **kwargs)
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
def user_data(self):
for user_data_subclass in AlertPluginUserData.__subclasses__():
user_data = user_data_subclass.objects.get_or_create(user=self, title=user_data_subclass.name)
return AlertPluginUserData.objects.filter(user=self)
def __unicode__(self):
return 'User profile: %s' % self.user.username
def save(self, *args, **kwargs):
# Enforce uniqueness
if self.fallback_alert_user:
profiles = UserProfile.objects.exclude(id=self.id)
profiles.update(fallback_alert_user=False)
return super(UserProfile, self).save(*args, **kwargs)
@property
def prefixed_mobile_number(self):
return '+%s' % self.mobile_number
mobile_number = models.CharField(max_length=20, blank=True, default='')
hipchat_alias = models.CharField(max_length=50, blank=True, default='')
fallback_alert_user = models.BooleanField(default=False)
class Shift(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
user = models.ForeignKey(User)
uid = models.TextField()
deleted = models.BooleanField(default=False)
def __unicode__(self):
deleted = ''
if self.deleted:
deleted = ' (deleted)'
return "%s: %s to %s%s" % (self.user.username, self.start, self.end, deleted)
def get_duty_officers(at_time=None):
"""Returns a list of duty officers for a given time or now if none given"""
duty_officers = []
if not at_time:
at_time = timezone.now()
current_shifts = Shift.objects.filter(
deleted=False,
start__lt=at_time,
end__gt=at_time,
)
if current_shifts:
duty_officers = [shift.user for shift in current_shifts]
return duty_officers
else:
try:
u = UserProfile.objects.get(fallback_alert_user=True)
return [u.user]
except UserProfile.DoesNotExist:
return []
def update_shifts():
events = get_events()
users = User.objects.filter(is_active=True)
user_lookup = {}
for u in users:
user_lookup[u.username.lower()] = u
future_shifts = Shift.objects.filter(start__gt=timezone.now())
future_shifts.update(deleted=True)
for event in events:
e = event['summary'].lower().strip()
if e in user_lookup:
user = user_lookup[e]
try:
s = Shift.objects.get(uid=event['uid'])
except Shift.DoesNotExist:
s = Shift(uid=event['uid'])
s.start = event['start']
s.end = event['end']
s.user = user
s.deleted = False
s.save()
|
dever860/cabot
|
cabot/cabotapp/models.py
|
Python
|
mit
| 30,857
|
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
|
kuangliu/pytorch-cifar
|
models/mobilenetv2.py
|
Python
|
mit
| 3,092
|
#Written by: Karim shoair - D4Vinci ( Dr0p1t-Framework )
import sys
from os.path import *
global G, Y, B, R, W , M , C , end
def set_colors():
global G, Y, B, R, W , M , C , end
if sys.platform.startswith('win'):
# Windows deserve coloring too :D
try:
import win_unicode_console , colorama
win_unicode_console.enable()
colorama.init()
#Now the unicode will work ^_^
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
M = '\x1b[35m' # magenta
C = '\x1b[36m' # cyan
end = '\33[97m'
except:
#print("[!] Error: Coloring libraries not installed ,no coloring will be used [Check the readme]")
G = Y = B = R = W = G = Y = B = R = W = ''
else:
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
M = '\x1b[35m' # magenta
C = '\x1b[36m' # cyan
end = '\33[97m'
set_colors()
# Console Colors
def colored_print(text,color):
global G, Y, B, R, W , M , C , end
def cprint(text,color,end=end ):
print(color+text+end)
if color.lower()=="g":color=G
elif color.lower()=="y":color=Y
elif color.lower()=="b":color=B
elif color.lower()=="r":color=R
elif color.lower()=="w":color=W
elif color.lower()=="m":color=M
elif color.lower()=="c":color=C
cprint( text, color, end )
def print_banner(banner,info,c1,c2):
global G, Y, B, R, W , M , C , end
end = '\33[97m'
def cprint(text,info,c1,c2):
print(c1+text+end)
print(c2+info+end)
cprint( banner,info,c1,c2 )
def warn():
global G, Y, B, R, W , M , C , end
return '''{} # Disclaimer Alert #{}
Dr0p1t Framework not responsible
for misuse or illegal purposes. {}
Use it only for {}work{} or {} educational purpose {} !!!'''.format(R,B,Y,R,Y,R,W)
def print_status(args):
global G, Y, B, R, W , M , C , end
set_colors() # because of some non logical error on some users devices :3
if args.s:
c1,a = G," Loaded "
else:
c1,a = R,"Unloaded"
if args.t:
c2,b = G," Loaded "
else:
c2,b = R,"Unloaded"
if args.k:
c3,c = G," Loaded "
else:
c3,c = R,"Unloaded"
if args.b:
c4,d = G," Loaded "
cx1,bat = M,args.b
else:
c4,d = R,"Unloaded"
cx1,bat = Y,"None"
if args.p:
c5,e = G," Loaded "
cx2,ps1 = M,args.p
else:
c5,e = R,"Unloaded"
cx2,ps1 = Y,"None"
if args.v:
c6,f = G," Loaded "
cx3,vbs = M,args.v
else:
c6,f = R,"Unloaded"
cx3,vbs = Y,"None"
if args.upx:
c7,g = G," Loaded "
else:
c7,g = R,"Unloaded"
if args.nouac:
c8,h = G," Loaded "
else:
c8,h = R,"Unloaded"
if args.a:
c9,i = G," Loaded "
else:
c9,i = R,"Unloaded"
if args.runas:
c10,j = G," Loaded "
else:
c10,j = R,"Unloaded"
if args.spoof:
c11,k = G," Loaded "
cx4,ext=M,args.spoof
else:
c11,k = R,"Unloaded"
cx4,ext=Y,"None"
if args.i:
c12,l = G," Loaded "
cx5,ico=M,args.i
else:
c12,l = R,"Unloaded"
cx5,ico=Y,args.i
print("\n"+Y+"[+] "+W+"Malware url : "+B+"%s"%args.url+W+
"\n"+Y+"\n[+] "+W+"Modules :"+
"\n\tStartup persistence\t: "+c1+"[%s]"%a+W+
"\n\tTask persistence\t: "+c2+"[%s]"%b+W+
"\n\tPowershell persistence\t: "+c9+"[%s]"%i+W+ #jklmn
"\n\tKill antivirus\t\t: "+c3+"[%s]"%c+W+
"\n\tDisable UAC\t\t: "+c8+"[%s]"%h+W+
"\n\tRun as admin\t\t: "+c10+"[%s]"%j+W+
"\n\tCompress with UPX\t: "+c7+"[%s]"%g+W+
"\n"+Y+"\n[+] "+W+"Scripts :"+
"\n\tBAT file : "+cx1+"%s"%bat+W+
"\n\tPS1 file : "+cx2+"%s"%ps1+W+
"\n\tVBS file : "+cx3+"%s"%vbs+W+"\n"+
"\n"+Y+"\n[+] "+W+"Spoofing :"+
"\n\tIcon spoof \t: "+cx5+"%s"%ico+W+
"\n\tExtension spoof : "+cx4+"%s"%ext+W+"\n"
)
|
D4Vinci/Dr0p1t-Framework
|
core/color.py
|
Python
|
mit
| 3,614
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_swoop_large4.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/building/poi/shared_tatooine_swoop_large4.py
|
Python
|
mit
| 452
|
"""
Django settings for lwc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1&pbr@s*=_81p1qsdo&o)c_q-^a&lgaojj!6l^-_1^ne$ffql8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'joins',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lwc.middleware.ReferMiddleware',
)
ROOT_URLCONF = 'lwc.urls'
WSGI_APPLICATION = 'lwc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#SHARE_URL = "http://launchwithcode.com/?ref="
SHARE_URL = "http://127.0.0.1:8000/?ref="
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates')
#BASE_DIR + "/templates/",
#'/Users/jmitch/Desktop/lwc/src/templates/',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = '/Users/jmitch/desktop/lwc/src/static/static_root/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
#'/Users/jmitch/desktop/lwc/src/static/static_dirs/',
# '/Users/jmitch/desktop/lwc/src/static/static_dirs/',
# '/Users/jmitch/desktop/lwc/src/static/static_dirs/',
# '/Users/jmitch/desktop/lwc/src/static/static_dirs/',
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEIDA_URL = '/media/'
|
codingforentrepreneurs/launch-with-code
|
lwc/settings/base.py
|
Python
|
mit
| 2,800
|
#!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxySubscriberCached
from sensor_msgs.msg import PointCloud2
class StorePointcloudState(EventState):
'''
Stores the latest pointcloud of the given topic.
-- topic string The topic on which to listen for the pointcloud.
#> pointcloud PointCloud2 The received pointcloud.
<= done Pointcloud has been received and stored.
'''
def __init__(self, topic):
super(StorePointcloudState, self).__init__(outcomes = ['done'],
output_keys = ['pointcloud'])
self._sub = ProxySubscriberCached({topic: PointCloud2})
self._pcl_topic = topic
def execute(self, userdata):
if self._sub.has_msg(self._pcl_topic):
userdata.pointcloud = self._sub.get_last_msg(self._pcl_topic)
return 'done'
|
pschillinger/lamor15
|
lamor_flexbe_states/src/lamor_flexbe_states/store_pointcloud_state.py
|
Python
|
mit
| 847
|
import sublime
HOST_PLATFORM = sublime.platform()
WINDOWS = 'windows'
LINUX = 'linux'
OSX = 'osx'
|
SublimeText/VintageEx
|
plat/__init__.py
|
Python
|
mit
| 109
|
# file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from tempfile import NamedTemporaryFile
from openpyxl.shared.compat import OrderedDict
from openpyxl.cell import get_column_letter, Cell
from openpyxl.worksheet import Worksheet
from openpyxl.shared.xmltools import (XMLGenerator, start_tag, end_tag, tag)
from openpyxl.shared.date_time import SharedDate
from openpyxl.shared.ooxml import MAX_COLUMN, MAX_ROW
from openpyxl.shared import NUMERIC_TYPES
from openpyxl.shared.exc import WorkbookAlreadySaved
from openpyxl.writer.excel import ExcelWriter
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.styles import StyleWriter
from openpyxl.style import Style, NumberFormat
from openpyxl.shared.ooxml import (ARC_SHARED_STRINGS, PACKAGE_WORKSHEETS)
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DESCRIPTORS_CACHE_SIZE = 50
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
def create_temporary_file(suffix=''):
fobj = NamedTemporaryFile(mode='w+', suffix=suffix, prefix='openpyxl.', delete=False)
filename = fobj.name
return filename
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`openpyxl.workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook, title):
Worksheet.__init__(self, parent_workbook, title)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header_name = create_temporary_file(suffix='.header')
self._fileobj_content_name = create_temporary_file(suffix='.content')
self._fileobj_name = create_temporary_file()
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
def get_temporary_file(self, filename):
if filename in self._descriptors_cache:
fobj = self._descriptors_cache[filename]
# re-insert the value so it does not get evicted
# from cache soon
del self._descriptors_cache[filename]
self._descriptors_cache[filename] = fobj
return fobj
else:
if filename is None:
raise WorkbookAlreadySaved('this workbook has already been saved '
'and cannot be modified or saved anymore.')
fobj = open(filename, 'r+')
self._descriptors_cache[filename] = fobj
if len(self._descriptors_cache) > DESCRIPTORS_CACHE_SIZE:
filename, fileobj = self._descriptors_cache.popitem(last=False)
fileobj.close()
return fobj
@property
def _descriptors_cache(self):
try:
return self._parent._local_data.cache
except AttributeError:
self._parent._local_data.cache = OrderedDict()
return self._parent._local_data.cache
@property
def filename(self):
return self._fileobj_name
@property
def _temp_files(self):
return (self._fileobj_content_name,
self._fileobj_header_name,
self._fileobj_name)
def _unset_temp_files(self):
self._fileobj_header_name = None
self._fileobj_content_name = None
self._fileobj_name = None
def write_header(self):
fobj = self.get_temporary_file(filename=self._fileobj_header_name)
doc = XMLGenerator(fobj, 'utf-8')
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._fileobj = self.get_temporary_file(filename=self._fileobj_name)
self._write_fileobj(self._fileobj_header_name)
self._write_fileobj(self._fileobj_content_name)
self._fileobj.close()
def _write_fileobj(self, fobj_name):
fobj = self.get_temporary_file(filename=fobj_name)
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
self._fileobj.flush()
def _close_content(self):
doc = self._get_content_generator()
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def _get_content_generator(self):
""" XXX: this is ugly, but it allows to resume writing the file
even after the handle is closed"""
# when I'll recreate the XMLGenerator, it will start writing at the
# begining of the file, erasing previously entered rows, so we have
# to move to the end of the file before adding new tags
handle = self.get_temporary_file(filename=self._fileobj_content_name)
handle.seek(0, 2)
doc = XMLGenerator(out=handle)
return doc
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self._get_content_generator()
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx + 1), row_idx)
attributes = {'r': coordinate}
if isinstance(cell, bool):
dtype = 'boolean'
elif isinstance(cell, NUMERIC_TYPES):
dtype = 'numeric'
elif isinstance(cell, (datetime.datetime, datetime.date)):
dtype = 'datetime'
cell = self._shared_date.datetime_to_julian(cell)
attributes['s'] = STYLES[dtype]['style']
elif cell and cell[0] == '=':
dtype = 'formula'
else:
dtype = 'string'
cell = self._string_builder.add(cell)
if dtype != 'formula':
attributes['t'] = STYLES[dtype]['type']
start_tag(doc, 'c', attributes)
if dtype == 'formula':
tag(doc, 'f', body='%s' % cell[1:])
tag(doc, 'v')
elif dtype == 'boolean':
tag(doc, 'v', body='%d' % cell)
else:
tag(doc, 'v', body='%s' % cell)
end_tag(doc, 'c')
end_tag(doc, 'row')
def save_dump(workbook, filename):
writer = ExcelDumpWriter(workbook)
writer.save(filename)
return True
class ExcelDumpWriter(ExcelWriter):
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleDumpWriter(workbook)
self.style_writer._style_list.append(DATETIME_STYLE)
def _write_string_table(self, archive):
shared_string_table = self.workbook.strings_table_builder.get_table()
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
for i, sheet in enumerate(self.workbook.worksheets):
sheet.write_header()
sheet.close()
archive.write(sheet.filename, PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1))
for filename in sheet._temp_files:
del sheet._descriptors_cache[filename]
os.remove(filename)
sheet._unset_temp_files()
class StyleDumpWriter(StyleWriter):
def _get_style_list(self, workbook):
return []
|
Jian-Zhan/customarrayformatter
|
openpyxl/writer/dump_worksheet.py
|
Python
|
mit
| 10,418
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe, unittest
from frappe.defaults import *
class TestDefaults(unittest.TestCase):
def test_global(self):
clear_user_default("key1")
set_global_default("key1", "value1")
self.assertEqual(get_global_default("key1"), "value1")
set_global_default("key1", "value2")
self.assertEqual(get_global_default("key1"), "value2")
add_global_default("key1", "value3")
self.assertEqual(get_global_default("key1"), "value2")
self.assertEqual(get_defaults()["key1"], ["value2", "value3"])
self.assertEqual(get_user_default_as_list("key1"), ["value2", "value3"])
def test_user(self):
set_user_default("key1", "2value1")
self.assertEqual(get_user_default_as_list("key1"), ["2value1"])
set_user_default("key1", "2value2")
self.assertEqual(get_user_default("key1"), "2value2")
add_user_default("key1", "3value3")
self.assertEqual(get_user_default("key1"), "2value2")
self.assertEqual(get_user_default_as_list("key1"), ["2value2", "3value3"])
def test_global_if_not_user(self):
set_global_default("key4", "value4")
self.assertEqual(get_user_default("key4"), "value4")
def test_clear(self):
set_user_default("key5", "value5")
self.assertEqual(get_user_default("key5"), "value5")
clear_user_default("key5")
self.assertEqual(get_user_default("key5"), None)
def test_clear_global(self):
set_global_default("key6", "value6")
self.assertEqual(get_user_default("key6"), "value6")
clear_default("key6", value="value6")
self.assertEqual(get_user_default("key6"), None)
def test_user_permission_on_defaults(self):
self.assertEqual(get_global_default("language"), "en")
self.assertEqual(get_user_default("language"), "en")
self.assertEqual(get_user_default_as_list("language"), ["en"])
old_user = frappe.session.user
user = 'test@example.com'
frappe.set_user(user)
perm_doc = frappe.get_doc(dict(
doctype='User Permission',
user=frappe.session.user,
allow="Language",
for_value="en-GB",
)).insert(ignore_permissions = True)
self.assertEqual(get_global_default("language"), None)
self.assertEqual(get_user_default("language"), None)
self.assertEqual(get_user_default_as_list("language"), [])
frappe.delete_doc('User Permission', perm_doc.name)
frappe.set_user(old_user)
|
frappe/frappe
|
frappe/tests/test_defaults.py
|
Python
|
mit
| 2,355
|
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
red_cross_filter = {"organisation.organisation_type_id$name" : "Red Cross / Red Crescent"}
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
)
# Additional menus
current.menu.personal = cls.menu_personal()
current.menu.dashboard = cls.menu_dashboard()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
T = current.T
return [
homepage("gis")(
),
homepage("hrm", "org", name=T("Staff"),
vars=dict(group="staff"))(
MM("Staff", c="hrm", f="staff"),
MM("Teams", c="hrm", f="group"),
MM("National Societies", c="org", f="organisation",
vars = red_cross_filter),
MM("Offices", c="org", f="office"),
MM("Job Titles", c="hrm", f="job_title"),
#MM("Skill List", c="hrm", f="skill"),
MM("Training Events", c="hrm", f="training_event"),
MM("Training Courses", c="hrm", f="course"),
MM("Certificate List", c="hrm", f="certificate"),
),
homepage("vol", name=T("Volunteers"))(
MM("Volunteers", c="vol", f="volunteer"),
MM("Teams", c="vol", f="group"),
MM("Volunteer Roles", c="vol", f="job_title"),
MM("Programs", c="vol", f="programme"),
#MM("Skill List", c="vol", f="skill"),
MM("Training Events", c="vol", f="training_event"),
MM("Training Courses", c="vol", f="course"),
MM("Certificate List", c="vol", f="certificate"),
),
homepage("member")(
MM("Members", c="member", f="membership"),
),
homepage("inv", "supply", "req")(
MM("Warehouses", c="inv", f="warehouse"),
MM("Received Shipments", c="inv", f="recv"),
MM("Sent Shipments", c="inv", f="send"),
MM("Items", c="supply", f="item"),
MM("Item Catalogs", c="supply", f="catalog"),
MM("Item Categories", c="supply", f="item_category"),
M("Requests", c="req", f="req")(),
#M("Commitments", f="commit")(),
),
homepage("asset")(
MM("Assets", c="asset", f="asset"),
MM("Items", c="asset", f="item"),
),
homepage("survey")(
MM("Assessment Templates", c="survey", f="template"),
MM("Disaster Assessments", c="survey", f="series"),
),
homepage("project")(
MM("Projects", c="project", f="project"),
MM("Communities", c="project", f="location"),
),
homepage("vulnerability")(
MM("Map", c="vulnerability", f="index"),
),
homepage("event", "irs")(
MM("Events", c="event", f="event"),
MM("Incident Reports", c="irs", f="ireport"),
),
homepage("deploy", name="RDRT")(
MM("Missions", c="deploy", f="mission", m="summary"),
MM("Members", c="deploy", f="human_resource", m="summary"),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_dashboard(cls):
""" Dashboard Menu (at bottom of page) """
DB = S3DashBoardMenuLayout
request = current.request
if request.controller == "vol":
dashboard = DB()(
DB("VOLUNTEERS",
c="vol",
image = "graphic_staff_wide.png",
title = "Volunteers")(
DB("Manage Volunteer Data", f="volunteer"),
DB("Manage Teams Data", f="group"),
),
DB("CATALOGS",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif request.controller in ("hrm", "org"):
dashboard = DB()(
DB("STAFF",
c="hrm",
image = "graphic_staff_wide.png",
title = "Staff")(
DB("Manage Staff Data", f="staff"),
DB("Manage Teams Data", f="group"),
),
DB("OFFICES",
c="org",
image = "graphic_office.png",
title = "Offices")(
DB("Manage Offices Data", f="office"),
DB("Manage National Society Data", f="organisation",
vars=red_cross_filter
),
),
DB("CATALOGS",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif request.controller == "default" and request.function == "index":
dashboard = DB(_id="dashboard")(
DB("Staff", c="hrm", f="staff", m="search",
image = "graphic_staff.png",
title = "Staff",
text = "Add new and manage existing staff."),
DB("Volunteers", c="vol", f="volunteer", m="search",
image = "graphic_volunteers.png",
title = "Volunteers",
text = "Add new and manage existing volunteers."),
DB("Members", c="member", f="index",
image = "graphic_members.png",
title = "Members",
text = "Add new and manage existing members."),
DB("Warehouses", c="inv", f="index",
image = "graphic_warehouse.png",
title = "Warehouses",
text = "Stocks and relief items."),
DB("Assets", c="asset", f="index",
image = "graphic_assets.png",
title = "Assests",
text = "Manage office inventories and assets."),
DB("Assessments", c="survey", f="index",
image = "graphic_assessments.png",
title = "Assessments",
text = "Design, deploy & analyze surveys."),
DB("Projects", c="project", f="index",
image = "graphic_tools.png",
title = "Projects",
text = "Tracking and analysis of Projects and Activities.")
)
else:
dashboard = None
return dashboard
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
# Language selector
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_self_registration()
menu_personal = MP()(
MP("Register", c="default", f="user",
m="register", check=self_registration),
MP("Login", c="default", f="user",
m="login", vars=dict(_next=login_next)),
MP("Lost Password", c="default", f="user",
m="retrieve_password"),
menu_lang
)
else:
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: s3_has_role("ORG_ADMIN") and \
not s3_has_role("ADMIN")
menu_personal = MP()(
MP("Administration", c="admin", f="index",
check=s3_has_role("ADMIN")),
MP("Administration", c="admin", f="user",
check=is_org_admin),
MP("Profile", c="default", f="person"),
MP("Change Password", c="default", f="user",
m="change_password"),
MP("Logout", c="default", f="user",
m="logout"),
menu_lang,
)
return menu_personal
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
def hrm(self):
""" HRM Human Resource Management """
session = current.session
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
if "hrm" not in s3:
current.s3db.hrm_vars()
hrm_vars = s3.hrm
SECTORS = "Clusters" if current.deployment_settings.get_ui_label_cluster() \
else "Sectors"
manager_mode = lambda i: hrm_vars.mode is None
personal_mode = lambda i: hrm_vars.mode is not None
is_org_admin = lambda i: hrm_vars.orgs and True or \
ADMIN in s3.roles
is_super_editor = lambda i: current.auth.s3_has_role("staff_super") or \
current.auth.s3_has_role("vol_super")
staff = {"group": "staff"}
return M()(
M("Staff", c="hrm", f=("staff", "person"),
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", f="person", m="import",
vars=staff, p="create"),
),
M("Staff & Volunteers (Combined)",
c="hrm", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M("Teams", c="hrm", f="group",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
M("National Societies", c="org",
f="organisation",
vars=red_cross_filter,
check=manager_mode)(
M("New", m="create",
vars=red_cross_filter
),
M("List All",
vars=red_cross_filter
),
M("Search", m="search",
vars=red_cross_filter
),
M("Import", m="import", p="create", check=is_org_admin)
),
M("Offices", c="org", f="office",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Department Catalog", c="hrm", f="department",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Job Title Catalog", c="hrm", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Import", m="import", p="create", check=is_org_admin),
),
#M("Skill Catalog", f="skill",
#check=manager_mode)(
#M("New", m="create"),
#M("List All"),
##M("Skill Provisions", f="skill_provision"),
#),
M("Training Events", c="hrm", f="training_event",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Reports", c="hrm", f="staff", m="report",
check=manager_mode)(
M("Staff Report", m="report"),
M("Expiring Staff Contracts Report",
vars=dict(expiring="1")),
M("Training Report", f="training", m="report2"),
),
M("Training Course Catalog", c="hrm", f="course",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Import", m="import", p="create", check=is_org_admin),
M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", c="hrm", f="certificate",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Import", m="import", p="create", check=is_org_admin),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN],
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Office Types", c="org", f="office_type",
restrict=[ADMIN],
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
#M("Facility Types", c="org", f="facility_type",
# restrict=[ADMIN],
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
#),
M(SECTORS, f="sector", c="org", restrict=[ADMIN],
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
#M("My Profile", c="hrm", f="person",
# check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Human Resources", c="hrm", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", c="hrm", f="person",
# check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def vol(self):
""" Volunteer Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
is_super_editor = lambda i: current.auth.s3_has_role("vol_super") or \
current.auth.s3_has_role("staff_super")
settings = current.deployment_settings
show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme"
show_tasks = lambda i: settings.has_module("project") and \
settings.get_project_mode_task()
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
check_org_dependent_field = lambda tablename, fieldname: \
settings.set_org_dependent_field(tablename, fieldname,
enable_field = False)
return M(c="vol")(
M("Volunteers", f="volunteer",
check=[manager_mode])(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", f="person", m="import",
vars={"group":"volunteer"}, p="create"),
),
M("Staff & Volunteers (Combined)",
c="vol", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
#M("Department Catalog", f="department",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
#),
M("Volunteer Role Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Import", m="import", p="create", check=is_org_admin),
),
#M("Skill Catalog", f="skill",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Skill Provisions", f="skill_provision"),
#),
M("Training Events", f="training_event",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Training Course Catalog", f="course",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", f="certificate",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Programs", f="programme",
check=[manager_mode, show_programmes])(
M("New", m="create"),
M("List All"),
M("Import Hours", f="programme_hours", m="import"),
),
M("Awards", f="award",
check=[manager_mode, is_org_admin])(
M("New", m="create"),
M("List All"),
),
M("Volunteer Cluster Type", f="cluster_type",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_type_id"))(
M("New", m="create"),
M("List All"),
),
M("Volunteer Cluster", f="cluster",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_id"))(
M("New", m="create"),
M("List All"),
),
M("Volunteer Cluster Position", f="cluster_position",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_position_id"))(
M("New", m="create"),
M("List All"),
),
M("Reports", f="volunteer", m="report",
check=manager_mode)(
M("Volunteer Report", m="report"),
M("Hours by Role Report", f="programme_hours", m="report2",
vars=Storage(rows="job_title_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Hours by Program Report", f="programme_hours", m="report2",
vars=Storage(rows="programme_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Training Report", f="training", m="report2"),
),
#M("My Profile", f="person",
# check=personal_mode, vars=dict(mode="personal")),
M("My Tasks", f="task",
check=[personal_mode, show_tasks],
vars=dict(mode="personal",
mine=1)),
# This provides the link to switch to the manager mode:
M("Volunteer Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", f="person",
# check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def inv(self):
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
current.s3db.inv_recv_crud_strings()
crud_strings = current.response.s3.crud_strings
inv_recv_list = crud_strings.inv_recv.title_list
inv_recv_search = crud_strings.inv_recv.title_search
use_commit = lambda i: current.deployment_settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
M("Warehouses", c="inv", f="warehouse")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Warehouse Stock", c="inv", f="inv_item")(
M("Search", f="inv_item", m="search"),
M("Search Shipped Items", f="track_item", m="search"),
M("Adjust Stock Levels", f="adj"),
#M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
#M("Expiration Report", c="inv", f="track_item",
# m="search", vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# m="search", vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# m="search", vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# m="search", vars=dict(report="inc")),
# M("Summary of Releases", c="inv", f="track_item",
# m="search", vars=dict(report="rel")),
),
M(inv_recv_list, c="inv", f="recv")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
),
M("Sent Shipments", c="inv", f="send")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Search Shipped Items", f="track_item", m="search"),
),
M("Items", c="supply", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("New", m="create"),
#M("List All"),
#M("Search", m="search"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("New", m="create"),
# M("List All"),
#),
M("Catalogs", c="supply", f="catalog")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Suppliers", c="inv", f="supplier")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Facilities", c="inv", f="facility")(
M("New", m="create", t="org_facility"),
M("List All"),
#M("Search", m="search"),
),
M("Facility Types", c="inv", f="facility_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Requests", c="req", f="req")(
M("New", m="create"),
M("List All"),
M("Requested Items", f="req_item"),
#M("Search Requested Items", f="req_item", m="search"),
),
M("Commitments", c="req", f="commit", check=use_commit)(
M("List All")
),
)
# -------------------------------------------------------------------------
def irs(self):
""" IRS Incident Reporting """
return M()(
M("Events", c="event", f="event")(
M("New", m="create"),
M("List All"),
),
M("Incident Reports", c="irs", f="ireport")(
M("New", m="create"),
M("List All"),
M("Open Incidents", vars={"open": 1}),
M("Map", m="map"),
M("Timeline", args="timeline"),
M("Report", m="report2")
),
M("Incident Categories", c="irs", f="icategory",
check=current.auth.s3_has_role(current.session.s3.system_roles.ADMIN))(
M("New", m="create"),
M("List All"),
),
M("Reports", c="irs", f="ireport", m="report")(
M("Incidents", m="report"),
),
)
# -------------------------------------------------------------------------
def org(self):
""" Organisation Management """
# Same as HRM
return self.hrm()
# -------------------------------------------------------------------------
def req(self):
""" Organisation Management """
# Same as Inventory
return self.inv()
# -------------------------------------------------------------------------
def event(self):
""" Event Management """
# Same as IRS
return self.irs()
# -------------------------------------------------------------------------
def deploy(self):
""" RDRT Alerting and Deployments """
return M()(
M("Missions",
c="deploy", f="mission", m="summary")(
M("New", m="create"),
),
M("Alerts",
c="deploy", f="alert")(
M("New", m="create"),
M("InBox",
c="deploy", f="email_inbox",
),
M("Settings",
c="deploy", f="email_channel",
),
),
M("RDRT Members",
c="deploy", f="human_resource", m="summary")(
M("Add Member", c="deploy", f="application", m="select"),
M("Import Members", c="deploy", f="person", m="import"),
),
)
# END =========================================================================
|
flavour/tldrmp
|
private/templates/IFRC/menus.py
|
Python
|
mit
| 31,780
|
# -*- coding: utf-8 -*-
# 자세한 설명은 상위 폴더의 03 - Seq2Seq.py 등에서 찾으실 수 있습니다.
import tensorflow as tf
# Seq2Seq 기본 클래스
class Seq2Seq:
logits = None
outputs = None
cost = None
train_op = None
def __init__(self, vocab_size, n_hidden=128, n_layers=3):
self.learning_late = 0.001
self.vocab_size = vocab_size
self.n_hidden = n_hidden
self.n_layers = n_layers
self.enc_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])
self.dec_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])
self.targets = tf.placeholder(tf.int64, [None, None])
self.weights = tf.Variable(tf.ones([self.n_hidden, self.vocab_size]), name="weights")
self.bias = tf.Variable(tf.zeros([self.vocab_size]), name="bias")
self.global_step = tf.Variable(0, trainable=False, name="global_step")
self.build_model()
self.saver = tf.train.Saver(tf.global_variables())
def build_model(self):
self.enc_input = tf.transpose(self.enc_input, [1, 0, 2])
self.dec_input = tf.transpose(self.dec_input, [1, 0, 2])
enc_cell, dec_cell = self.build_cells()
with tf.variable_scope('encode'):
outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, self.enc_input, dtype=tf.float32)
with tf.variable_scope('decode'):
outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, self.dec_input, dtype=tf.float32,
initial_state=enc_states)
self.logits, self.cost, self.train_op = self.build_ops(outputs, self.targets)
self.outputs = tf.argmax(self.logits, 2)
def build_cells(self, output_keep_prob=0.5):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(self.n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=output_keep_prob)
enc_cell = tf.nn.rnn_cell.MultiRNNCell([enc_cell] * self.n_layers)
dec_cell = tf.nn.rnn_cell.BasicRNNCell(self.n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=output_keep_prob)
dec_cell = tf.nn.rnn_cell.MultiRNNCell([dec_cell] * self.n_layers)
return enc_cell, dec_cell
def build_ops(self, outputs, targets):
time_steps = tf.shape(outputs)[1]
outputs = tf.reshape(outputs, [-1, self.n_hidden])
logits = tf.matmul(outputs, self.weights) + self.bias
logits = tf.reshape(logits, [-1, time_steps, self.vocab_size])
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets))
train_op = tf.train.AdamOptimizer(learning_rate=self.learning_late).minimize(cost, global_step=self.global_step)
tf.summary.scalar('cost', cost)
return logits, cost, train_op
def train(self, session, enc_input, dec_input, targets):
return session.run([self.train_op, self.cost],
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
def test(self, session, enc_input, dec_input, targets):
prediction_check = tf.equal(self.outputs, self.targets)
accuracy = tf.reduce_mean(tf.cast(prediction_check, tf.float32))
return session.run([self.targets, self.outputs, accuracy],
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
def predict(self, session, enc_input, dec_input):
return session.run(self.outputs,
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input})
def write_logs(self, session, writer, enc_input, dec_input, targets):
merged = tf.summary.merge_all()
summary = session.run(merged, feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
writer.add_summary(summary, self.global_step.eval())
|
junsooo/Fake_love
|
chatbot/model.py
|
Python
|
mit
| 4,248
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ScannerEngineConfig(AppConfig):
name = 'scanner_engine'
|
Josowsky/Simple-Site-Monitor
|
scanner_engine/apps.py
|
Python
|
mit
| 143
|
place_time = {
"1" : "United Kingdom, London, MI6 / fri 28 august, 3:50 a.m.",
"2" : "London, agent Harry's House / fri 28 august, 3:55 a.m.",
"3" : "Ireland, Dublin, agent Patrick's House / fri 28 august, 9:45 a.m.",
"4" : "Ireland, Dublin, agent Patrick's House / fri 28 august, 11:30 a.m.",
"5" : "Ireland, Dublin, agent Patrick's House / fri 28 august, 11:40 a.m.",
"6" : "Ireland, Dublin, agent Patrick's House / fri 28 august, 11:45 a.m.",
"7" : "Ireland, Dublin, Murphy's Pub / fri 28 august, 1:00 p.m.",
"8" : "Ireland, Dublin, Murphy's Pub / fri 28 august, 1:10 p.m.",
"9" : "Ireland, Dublin, Murphy's Pub / fri 28 august, 1:20 p.m.",
"10" : "Ireland, Dublin, Murphy's Pub / fri 28 august, 3:30 p.m.",
"11" : "Ireland, Dublin, Murphy's Pub / fri 28 august, 4:15 p.m.",
"12" : "Ireland, Kildare, Kilkea Castle / fri 28 august, 6:30 p.m.",
"13" : "Ireland, Kildare, Kilkea Castle / fri 28 august, 8:30 p.m.",
"14" : "Ireland, Kildare, Kilkea Castle / fri 28 august, 8:35 p.m.",
"15" : "Ireland, Kildare, Kilkea Castle / fri 28 august, 11:55 p.m.",
"16" : "United Kingdom, London, MI6 / wed 3 september, 2:00 p.m."
}
text = {
"1" : "Агент Гарри получает задание немедленно отправляться на поиски своего коллеги.",
"2" : "Прибыв на место проишествия агент Гарри, увидел перевёрнутое вверх дном жилище своего коллеги... Проведя немало времени в поисках хоть какой-то улики, он начал отчаиваться, как вдруг заметил небольшой листок под диваном...",
"3" : "Прошло ещё немало времени, прежде чем агент Гарри наконец-то понял, что за шифр был использован в послание на найденном листке...",
"3_2" : "Исходный текст: ADCDEFGHIJKLMNOPQRSTUVWXYZ",
"3_3" : "Зашифрованный : ZYXWVUTSRQPONMLKJIHGFEDCBA",
"4" : "'Ask Murphy' что бы это значило... - подумал Гарри. Времени совсем мало, надо опросить соседей, может кто что видел. Времени хватит, что бы опросить только одного... Местные полицейские уже всех опросили, и дали следующий список:",
"5" : "Джинни, рассказала, что не могла спать прошлой ночью, так как старый Карл постоянно выл на луну. Она смотрела в окно, и видела около дома Патрика несколько сов, когда они пролетели на фоне полной луны... Больше она ничего рассказать не смогла, кроме воя она ничего не слышала. На вопрос знает ли она Murphy, сказала что это имя владельца паба неподалёку.",
"6" : "Агент Гарри заказал Guinness и спросил у владельца знает ли он Патрика. Владелец посмотрел на Гарри и спросил кодовое слово.",
"7" : "Владелец сказал, что недавно Патрик оставил ему конверт. Гарри взял конверт, в нём был лишь небольшой диск. Гарри открыл диск на своём ноутбуке, к сожалению, он был зашифрован. Пришлось ограничится письмом в MI6, и ждать ответа...",
"8" : "После нескольких пинт Guinness и часов ожидания, пришло сообщения от MI6. Оказалось, что часть данных, найденная на диске, связана со старым делом TOPS7897_89, в котором участвовало 6 агентов сов:",
"8_2" : "В ожидании дополнительной информации агент Гарри решил сузить круг подозреваемых.",
"9" : "Спустя ещё одну пинту Guinness и час ожидания, пришло новое сообщение, c местоположением всех агентов:",
"10" : "Теперь осталось трое. Надо выбрать из них главного подозреваемого.",
"11" : "Всё указывает на то, что за похищением Патрика стоит агент Пирс... Надо спешить, жизнь Патрика в опасности! Пирс пойдёт на всё, что бы узнать, где диск!",
"12" : "Агент Гарри не успел насладится красотой замка, в котором по информации MI6 находится агент Пирс, так как сразу был схвачен охраной. После выпитого сопротивлятся сил не было...",
"13" : "Очнулся он уже в темнице, через пару часов... Он был не один, рядом с ним сидел агент Патрик. Который был рад, что коллега накоец проснулся. Он рассказал, что был похищен ночью из-за того, что Пирс не смог найти у него дома диск... Узнав, что диск сейчас в безопасности, он предложил выбираться из плена. В темнице он нашёл потойной ход, скрываемый за стеной с замком-головоломкой, которую ему пока не удалось решить...",
"14" : "Загадку со стены, я перересовал на листок. - Сказал Патрик. - Необходимо придти от Start к Finish, собрав по пути все зелёные кружки, минуя серые клетки. Для этого в семи клетках надо правильно расположить черту:",
"15" : "Проследовав по тунелю агенты покинули замок, выйдя в лесу неподалёку. Над ними пролетели вертолёты с агентами MI6 и их Ирландскими коллегами. Задание выполнено!",
"16" : "Агент Пирс и его собщники были арестованы. Агенты Патрик и Гарри были награждены за храборость и отвагу в защите Соедиённого Королевства. Впереди их ждали новые дела и новые испытания на службе Её Величеству...",
}
witness = [
{
"value":"1",
"text":"Джон, люитель Guinness'a и телевизора, обычно спит прямо напротив него;"
},
{ "value":"2",
"text":"Джинни, часто страдает бессоницей и любит смотреть в окно;"
},
{ "value":"3",
"text":"Карл, плохо слышит и видит, но в полнолуние (было этой ночью), не спит и воет на луну;"
},
{ "value":"4",
"text":"Нэнси, любит поспать, любопынта, лишь когда речь заходит о местных распродажах;"
}
]
agents = [
{
"value":"1",
"text":"Диана"
},
{
"value":"2",
"text":"Чарльз"
},
{
"value":"3",
"text":"Пирс"
},
{
"value":"4",
"text":"Шарлотта"
},
{
"value":"5",
"text":"Майкл"
},
{
"value":"6",
"text":"Колин"
}
]
agents2 = [
{
"value":"1",
"text":"Диана"
},
{
"value":"2",
"text":"Пирс"
},
{
"value":"3",
"text":"Майкл"
}
]
mi6_msg = {
"1" : "СРОЧНО: Сегодня ночью из собственного дома в Дублине был похищен агент Патрик.",
"1_2": "был похищен агент Патрик.",
"2" : "ДОПОЛНИТЕЛЬНО: В настоящий момент агенты Чарльз и Колин находятся на задании в Северной Америки, агент Шарлотта погибла при исполнение долга год назад. Остальные агенты в данный момент не задействованы в операциях MI6. Ожидайте в ближайший час дополнительную информацию о местоположении перечисленных агентов."
}
mi6_msg2 = [
"ДОПОЛНИТЕЛЬНО: В настоящий момент агенты Чарльз и Колин",
"находятся на задании в Северной Америки, агент Шарлотта",
"погибла при исполнение долга год назад. Остальные агенты в",
"данный момент не задействованы в операциях MI6. Ожидайте в",
"ближайший час дополнительную информацию о местоположении",
"перечисленных агентов."
]
err_text = "надо подумать ещё..."
err_text2 = "надо дать какой-то ответ..."
|
L-yCoyote/web-quest
|
site/text_data.py
|
Python
|
mit
| 9,734
|
import datetime
import time
from random import choice
import discord
import lifesaver
from discord.ext import commands
from lifesaver.bot.storage import AsyncJSONStorage
from lifesaver.utils import (
ListPaginator,
clean_mentions,
human_delta,
pluralize,
truncate,
)
from .converters import Messages, QuoteName
from .utils import stringify_message
__all__ = ["Quoting"]
def embed_quote(quote) -> discord.Embed:
embed = discord.Embed()
embed.description = quote["content"]
embed.add_field(name="Jump", value=quote["jump_url"], inline=False)
creator = quote["created_by"]["tag"]
channel = quote["created_in"]["name"]
ago = human_delta(datetime.datetime.utcfromtimestamp(quote["created"]))
embed.set_footer(text=f"Created by {creator} in #{channel} {ago} ago")
return embed
class Quoting(lifesaver.Cog):
def __init__(self, bot, *args, **kwargs):
super().__init__(bot, *args, **kwargs)
self.storage = AsyncJSONStorage("quotes.json", loop=bot.loop)
def quotes(self, guild: discord.Guild):
return self.storage.get(str(guild.id), {})
@lifesaver.command(aliases=["rq"])
@commands.guild_only()
async def random_quote(self, ctx):
"""Shows a random quote."""
quotes = self.quotes(ctx.guild)
if not quotes:
await ctx.send(
"There are no quotes in this server. Create some with "
f"`{ctx.prefix}quote new`. For more information, see `{ctx.prefix}"
"help quote`."
)
return
(name, quote) = choice(list(quotes.items()))
embed = embed_quote(quote)
name = clean_mentions(ctx.channel, name)
await ctx.send(name, embed=embed)
@lifesaver.group(aliases=["q"], invoke_without_command=True)
@commands.guild_only()
async def quote(self, ctx, *, name: QuoteName(must_exist=True)):
"""Views a quote.
Quotes are essentially pictures of multiple messages and stores them
in my database.
You can specify multiple message IDs to store:
d?quote new "my quote" 467753625024987136 467753572633673773 ...
Alternatively, you can specify a message ID then a number of messages
to store after that, like:
d?quote new "my quote" 467753625024987136+5
That would store message 467753625024987136 and the 5 messages after
that. You can also combine them if you would like to simultaneously
specify individual messages and groups of messages. Alternatively,
you can select the last 5 messages like so:
d?quote new "my quote" :-5
The :n or +n (called the "range") will grab up to 50 messages both ways.
Your quote's content has a length limit of 2048, Discord's embed
description limit. You will be prompted to confirm if your created
quote goes over this limit.
To read a quote, just specify its name, and no message IDs:
d?quote my quote
The number of embeds in any message (if any) and any attachment URLs
are preserved. Additionally, quotes contain a jump URL to jump to the
first message in the quote directly with your client.
If you want to create a quote without having the quote echo in chat,
prefix the quote name with "!":
d?quote !quote 467753625024987136+3
The bot will DM you the quote instead of echoing it in chat, and no
feedback will be provided in the channel. Keep in mind that the name of
the created quote will not have the "!".
Quotes contain the following data:
- All message content, all numbers of embeds, all attachment URLs
- Channel ID and name, first message ID, guild ID
- Creation timestamp
- Quote creator ID and username#discriminator
"""
quotes = self.quotes(ctx.guild)
quote = quotes.get(name)
embed = embed_quote(quote)
await ctx.send(embed=embed)
@quote.command(aliases=["new"])
@commands.guild_only()
async def create(
self, ctx, name: QuoteName(must_not_exist=True), *messages: Messages
):
"""Creates a quote.
See `d?help quote` for more information.
"""
quotes = self.quotes(ctx.guild)
silent = name.startswith("!")
if silent:
# Remove the !
name = name[1:]
# the converter can return multiple messages if a range is specified
quoted = []
for message in messages:
if isinstance(message, list):
quoted += message
else:
quoted.append(message)
strings = map(stringify_message, quoted)
quote_content = "\n".join(strings)
if len(quote_content) > 2048:
over_limit = pluralize(character=len(quote_content) - 2048)
if not await ctx.confirm(
"Quote is quite large...",
(
f"This quote is pretty big. ({over_limit} over limit.) "
"It will be truncated to 2048 characters. Continue?"
),
):
return
quote = quotes[name] = {
"content": truncate(quote_content, 2048),
"jump_url": quoted[0].jump_url,
"created": time.time(),
"created_by": {"id": ctx.author.id, "tag": str(ctx.author)},
"created_in": {"id": ctx.channel.id, "name": ctx.channel.name},
"guild": {"id": ctx.guild.id},
}
await self.storage.put(str(ctx.guild.id), quotes)
embed = embed_quote(quote)
await (ctx.author if silent else ctx).send(
f'Created quote "{name}".', embed=embed
)
@quote.command()
@commands.guild_only()
async def list(self, ctx):
"""Lists quotes on this server."""
quotes = self.quotes(ctx.guild)
if not quotes:
await ctx.send("No quotes exist for this server.")
return
tag_names = [clean_mentions(ctx.channel, name) for name in quotes.keys()]
paginator = ListPaginator(
tag_names,
ctx.author,
ctx.channel,
title="All quotes",
per_page=20,
bot=ctx.bot,
)
await paginator.create()
@quote.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def rename(
self,
ctx,
existing: QuoteName(must_exist=True),
new: QuoteName(must_not_exist=True),
):
"""Renames a quote."""
quotes = self.quotes(ctx.guild)
quotes[new] = quotes[existing]
del quotes[existing]
await self.storage.put(str(ctx.guild.id), quotes)
await ctx.send(f'Quote "{existing}" was renamed to "{new}".')
@quote.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def delete(self, ctx, *, quote: QuoteName(must_exist=True)):
"""Deletes a quote."""
quotes = self.quotes(ctx.guild)
del quotes[quote]
await self.storage.put(str(ctx.guild.id), quotes)
await ctx.ok()
|
slice/dogbot
|
dog/ext/quoting/cog.py
|
Python
|
mit
| 7,284
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewaySku(Model):
"""SKU of an application gateway.
:param name: Name of an application gateway SKU. Possible values are:
'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and
'WAF_Large'. Possible values include: 'Standard_Small', 'Standard_Medium',
'Standard_Large', 'WAF_Medium', 'WAF_Large'
:type name: str or :class:`ApplicationGatewaySkuName
<azure.mgmt.network.v2016_09_01.models.ApplicationGatewaySkuName>`
:param tier: Tier of an application gateway. Possible values are:
'Standard' and 'WAF'. Possible values include: 'Standard', 'WAF'
:type tier: str or :class:`ApplicationGatewayTier
<azure.mgmt.network.v2016_09_01.models.ApplicationGatewayTier>`
:param capacity: Capacity (instance count) of an application gateway.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, name=None, tier=None, capacity=None):
self.name = name
self.tier = tier
self.capacity = capacity
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/application_gateway_sku.py
|
Python
|
mit
| 1,696
|
#-*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2013 Rinze de Laat, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from functools import partial
import gc
import logging
from odemis import util
from odemis.util import limit_invocation, TimeoutError
from odemis.util import timeout
import time
import unittest
import weakref
logging.getLogger().setLevel(logging.DEBUG)
class TestLimitInvocation(unittest.TestCase):
def test_not_too_often(self):
self.count = 0
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
self.count_max_1s()
time.sleep(0.01)
self.assertLessEqual(self.count, 2, "method was called more than twice in 1 second: %d" % self.count)
time.sleep(2) # wait for the last potential calls to happen
self.assertLessEqual(self.count, 3, "method was called more than three times in 2 seconds: %d" % self.count)
@limit_invocation(1)
def count_max_1s(self):
# never called more than once per second
self.count += 1
time.sleep(0.2)
def test_gc(self):
u = Useless()
wku = weakref.ref(u)
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
u.doit(time.time(), b=3)
time.sleep(0.01)
# Check the object u has nothing preventing it from being dereferenced
del u
time.sleep(1) # wait for the last potential calls to happen
self.assertIsNone(wku())
class Useless(object):
"""
Independent class for testing limit_invocation decorator
"""
def __del__(self):
print "Useless %r is gone" % self
@limit_invocation(0.1)
def doit(self, a, b=None):
print "doing it %s, %s" % (a, b)
class TestTimeout(unittest.TestCase):
@timeout(1.2)
def test_notimeout(self):
time.sleep(1)
def test_timeout(self):
self.assertRaises(TimeoutError, self.toolong)
@timeout(0.5)
def toolong(self):
# will always timeout
time.sleep(1)
class SortedAccordingTestCase(unittest.TestCase):
def test_simple(self):
in_exp = ((([1, 2, 3], [3, 2, 1]), [3, 2, 1]),
(([1, 2, 3], [4, 2]), [2, 1, 3]),
(([], [4, 2]), []),
((["b", "a"], []), ["b", "a"]),
)
for i, eo in in_exp:
o = util.sorted_according_to(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
class AlmostEqualTestCase(unittest.TestCase):
def test_simple(self):
in_exp = {(0., 0): True,
(-5, -5.): True,
(1., 1. - 1e-9): True,
(1., 1. - 1e-3): False,
(1., 1. + 1e-3): False,
(-5e-8, -5e-8 + 1e-19): True,
(5e18, 5e18 + 1): True,
}
for i, eo in in_exp.items():
o = util.almost_equal(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
# Bounding box clipping test data generation
def tp(trans, ps):
""" Translate points ps using trans """
r = []
i = 0
for p in ps:
r.append(p + trans[i])
i = (i + 1) % len(trans)
return tuple(r)
# First we define a bounding boxes, at different locations
bounding_boxes = [(-2, -2, 0, 0),
(-1, -1, 1, 1),
(0, 0, 2, 2),
(2, 2, 4, 4)]
# From this, we generate boxes that are situated all around these
# bounding boxes, but that do not touch or overlap them.
def relative_boxes(bb):
t_left = [(-3, i) for i in range(-3, 4)]
to_the_left = [tp(t, bb) for t in t_left]
t_top = [(i, -3) for i in range(-3, 4)]
to_the_top = [tp(t, bb) for t in t_top]
t_right = [(3, i) for i in range(-3, 4)]
to_the_right = [tp(t, bb) for t in t_right]
t_bottom = [(i, 3) for i in range(-3, 4)]
to_the_bottom = [tp(t, bb) for t in t_bottom]
outside_boxes = to_the_left + to_the_top + to_the_right + to_the_bottom
# Selection boxes that touch the outside of the bounding box
touch_left = [tp((1, 0), b) for b in to_the_left[1:-1]]
touch_top = [tp((0, 1), b) for b in to_the_top[1:-1]]
touch_right = [tp((-1, 0), b) for b in to_the_right[1:-1]]
touch_bottom = [tp((0, -1), b) for b in to_the_bottom[1:-1]]
touching_boxes = touch_left + touch_top + touch_right + touch_bottom
# Partial overlapping boxes
overlap_left = [tp((1, 0), b) for b in touch_left[1:-1]]
overlap_top = [tp((0, 1), b) for b in touch_top[1:-1]]
overlap_right = [tp((-1, 0), b) for b in touch_right[1:-1]]
overlap_bottom = [tp((0, -1), b) for b in touch_bottom[1:-1]]
overlap_boxes = overlap_left + overlap_top + overlap_right + overlap_bottom
return outside_boxes, touching_boxes, overlap_boxes
class CanvasTestCase(unittest.TestCase):
def test_clipping(self):
tmp = "{}: {} - {} -> {}"
for bb in bounding_boxes:
outside, touching, overlap = relative_boxes(bb)
for b in outside:
r = util.rect_intersect(b, bb)
msg = tmp.format("outside", b, bb, r)
self.assertIsNone(r, msg)
for b in touching:
r = util.rect_intersect(b, bb)
msg = tmp.format("touching", b, bb, r)
self.assertIsNone(r, msg)
for b in overlap:
r = util.rect_intersect(b, bb)
msg = tmp.format("overlap", b, bb, r)
self.assertIsNotNone(r, msg)
# 'Manual' checks
if bb == (-1, -1, 1, 1):
if b[:2] == (-2, -2):
self.assertEqual(r, (-1, -1, 0, 0), msg)
elif b[:2] == (0, -1):
self.assertEqual(r, (0, -1, 1, 1), msg)
elif b[:2] == (0, 0):
self.assertEqual(r, (0, 0, 1, 1), msg)
# full and exact overlap
b = bb
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
# inner overlap
b = (bb[0] + 1, bb[1] + 1, bb[2], bb[3])
r = util.rect_intersect(b, bb)
self.assertEqual(r, b)
# overflowing overlap
b = (bb[0] - 1, bb[1] - 1, bb[2] + 1, bb[2] + 1)
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
def test_line_clipping(self):
bounding_box = (0, 4, 4, 0)
clip = partial(util.clip_line, *bounding_box)
# Test lines within bounding box, i.e. no clipping should occur
internal = [
(0, 0, 0, 0),
(2, 2, 2, 2),
(0, 0, 4, 4),
(4, 4, 0, 0),
(0, 2, 2, 0),
(2, 0, 0, 2),
]
for line in internal:
self.assertEqual(line, clip(*line))
# Test clipping for lines originating in the center of the bounding box and ending outside
# of it.
inner_to_outer = [
((2, 2, 2, 6), (2, 2, 2, 4)),
((2, 2, 6, 2), (2, 2, 4, 2)),
((2, 2, 2, -2), (2, 2, 2, 0)),
((2, 2, -2, 2), (2, 2, 0, 2)),
((2, 2, 6, -2), (2, 2, 4, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
]
for orig, clipped in inner_to_outer:
self.assertEqual(clipped, clip(*orig))
outer_to_inner = [
((2, 6, 2, 2), (2, 4, 2, 2)),
((6, 2, 2, 2), (4, 2, 2, 2)),
((2, -2, 2, 2), (2, 0, 2, 2)),
((-2, 2, 2, 2), (0, 2, 2, 2)),
((6, -2, 2, 2), (4, 0, 2, 2)),
((-2, -2, 2, 2), (0, 0, 2, 2)),
((-2, -2, 2, 2), (0, 0, 2, 2)),
]
for orig, clipped in outer_to_inner:
self.assertEqual(clipped, clip(*orig))
if __name__ == "__main__":
unittest.main()
|
gstiebler/odemis
|
src/odemis/util/test/util_test.py
|
Python
|
gpl-2.0
| 8,569
|
from django.test import TestCase
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from axes.signals import user_locked_out
import json
import time
from family_tree.models.family import Family
from family_tree.models.person import Person
from custom_user.models import User
@override_settings(SECURE_SSL_REDIRECT=False, AXES_BEHIND_REVERSE_PROXY=False)
class JWTAuthTest(TestCase):
'''
Tests JWT auth
'''
def setUp(self):
self.family = Family()
self.family.save()
self.user = User.objects.create_user(email='gracehopper@example.com',
password='compiler',
name='Grace Hopper',
family_id = self.family.id)
self.person = Person(name='Grace Hopper',
gender='F',
email='gracehopper@example.com',
family_id=self.family.id,
language='en',
user_id=self.user.id)
self.person.save()
def test_jwt_auth_and_refresh_token_created_on_correct_auth_details(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
auth_details = {
'email': 'gracehopper@example.com',
'password': 'compiler'
}
response = client.post('/api/auth/obtain_token/', auth_details, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
access_token = json.loads(response.content)["access"]
refresh_token = json.loads(response.content)["refresh"]
auth_token = {
'refresh': refresh_token
}
# Sleep to ensure new token is different
time.sleep(1)
refresh_response = client.post('/api/auth/refresh_token/', auth_token, format='json')
refresh_token = json.loads(refresh_response.content)["access"]
self.assertEqual(refresh_response.status_code, status.HTTP_200_OK)
self.assertNotEqual(refresh_token, access_token)
# Check verify token
new_auth_token ={#
'token': refresh_token
}
verify_new_token_response = client.post('/api/auth/verify_token/', new_auth_token, format='json')
self.assertEqual(verify_new_token_response.status_code, status.HTTP_200_OK)
# Check ip not locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertEqual(b'false', locked_response.content)
self.assertEqual(locked_response.status_code, status.HTTP_200_OK)
def test_jwt_fails_on_auth_incorrect_password(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
payload = {
'email': 'gracehopper@example.com',
'password': 'COBOL'
}
response = client.post('/api/auth/obtain_token/', payload, format='json')
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
def test_verify_fails_on_invalid_token(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
invalid_auth_token ={#
'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImp0aSI6IjM1ODU0ODc3LWQyZjQtNDIxZS04ZDI5LWY3YTgxNTk3NzdhYyIsImlhdCI6MTU1NDM4NzU4NCwiZXhwIjoxNTU0MzkxMTg0fQ.yIr0TMbalatx7alU1TMGIxxaelqquMJfz3m4H7AA9v4'
}
verify_old_token_response = client.post('/api/auth/verify_token/', invalid_auth_token, format='json')
self.assertNotEqual(verify_old_token_response.status_code, status.HTTP_200_OK)
def test_account_locks_out_on_multiple_invalid_login_attempts(self):
user = User.objects.create_user(email='adelegoldberg@example.com',
password='smalltalk',
name='Adele Goldberg',
family_id = self.family.id)
person = Person(name='Adele Goldberg',
gender='F',
email='adelegoldberg@example.com',
family_id=self.family.id,
language='en',
user_id=user.id)
person.save()
# 127.0.0.1 is whitelisted
client = APIClient(HTTP_X_REAL_IP='127.0.0.2')
wrong_auth_details = {
'email': 'adelegoldberg@example.com',
'password': 'compiler'
}
for x in range(0, 6):
response = client.post('/api/auth/obtain_token/', wrong_auth_details, format='json')
correct_auth_details = {
'email': 'adelegoldberg@example.com',
'password': 'smalltalk'
}
final_response = client.post('/api/auth/obtain_token/', correct_auth_details, format='json')
self.assertNotEqual(final_response.status_code, status.HTTP_200_OK)
# Check ip locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertNotEqual(b'false', locked_response.content)
def test_api_docs_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/docs/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_schema_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/schema/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
JustinWingChungHui/MyFamilyRoot
|
auth_api/tests.py
|
Python
|
gpl-2.0
| 5,613
|
# -*- coding: iso-8859-1 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""A lexical analyzer class for simple shell-like syntaxes.
This version has been modified slightly to work better with unicode.
It was forked from the version of shlex that ships with python 3.2.2.
A few minor features and functions have been added. """
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# ("wordterminators" (unicode support) hack by Andrew Jewett September 2011)
import os.path
import sys
from collections import deque
import re, fnmatch
import string
#import gc
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__all__ = ["TtreeShlex",
"split",
"LineLex",
"SplitQuotedString",
"EscCharStrToChar",
"SafelyEncodeString",
"RemoveOuterQuotes",
"MaxLenStr",
"HasWildCard",
#"IsRegex",
"InputError",
"ErrorLeader",
"SrcLoc",
"OSrcLoc",
"TextBlock",
"VarRef",
"VarNPtr",
"VarBinding",
"SplitTemplate",
"SplitTemplateMulti",
"TableFromTemplate",
"ExtractCatName",
#"_TableFromTemplate",
#"_DeleteLineFromTemplate",
"DeleteLinesWithBadVars",
"TemplateLexer"]
class TtreeShlex(object):
""" A lexical analyzer class for simple shell-like syntaxes.
TtreeShlex is a backwards-compatible version of python's standard shlex
module. It has the additional member: "self.wordterminators", which
overrides the "self.wordchars" member. This enables better handling of
unicode characters by allowing a much larger variety of characters to
appear in words or tokens parsed by TtreeShlex.
"""
custom_path = None
def __init__(self,
instream=None,
infile=None,
custom_include_path=None,
posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.wordterminators = set([]) #WORDTERMINATORS
self.prev_space_terminator = '' #WORDTERMINATORS
self.whitespace = ' \t\r\f\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
# self.source_triggers
# are tokens which allow the seamless insertion of other
# files into the file being read.
self.source_triggers=set(['source'])
self.source_triggers_x=set([])
#Note: self.source_triggers_x
# This is a subset of self.source_triggers.
# In this case file inclusion is exclusive.
# In other words, if one of these tokens
# is encountered, the file is only included
# if it has not been included already.
self.source_files_restricted = set([])
self.include_path = []
if TtreeShlex.custom_path:
include_path_list = TtreeShlex.custom_path.split(':')
self.include_path += [d for d in include_path_list if len(d)>0]
if 'TTREE_PATH' in os.environ:
include_path_list = os.environ['TTREE_PATH'].split(':')
self.include_path += [d for d in include_path_list if len(d)>0]
if self.debug:
print('TtreeShlex: reading from %s, line %d' \
% (self.instream, self.lineno))
self.end_encountered = False
@staticmethod #WORDTERMINATORS
def _belongs_to(char, include_chars, exclude_chars): #WORDTERMINATORS
if ((not exclude_chars) or (len(exclude_chars)==0)): #WORDTERMINATORS
return char in include_chars #WORDTERMINATORS
else: #WORDTERMINATORS
return char not in exclude_chars #WORDTERMINATORS
def push_raw_text(self, text):
"""Push a block of text onto the stack popped by the ReadLine() method.
(If multiple lines are present in the text, (which is determined by
self.line_terminators), then the text is split into multiple lines
and each one of them is pushed onto this stack individually.
The "self.lineno" counter is also adjusted, depending on the number
of newline characters in "line".
Do not strip off the newline, or other line terminators
at the end of the text block before using push_raw_text()!
"""
if self.debug >= 1:
print("TtreeShlex: pushing token " + repr(text))
for c in reversed(text): #WORDTERMINATORS
self.pushback.appendleft(c) #WORDTERMINATORS
if c == '\n': #WORDTERMINATORS
self.lineno -= 1 #WORDTERMINATORS
if len(text) > 0: #WORDTERMINATORS
self.end_encountered = False #WORDTERMINATORS
def push_token(self, text):
"Push a token onto the stack popped by the get_token method"
self.push_raw_text(text+self.prev_space_terminator)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('TtreeShlex: pushing to file %s' % (self.infile,))
else:
print('TtreeShlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('TtreeShlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
#### #CHANGING: self.pushback is now a stack of characters, not tokens #WORDTERMINATORS
#### if self.pushback: #WORDTERMINATORS
#### tok = self.pushback.popleft() #WORDTERMINATORS
#### if self.debug >= 1: #WORDTERMINATORS
#### print("TtreeShlex: popping token " + repr(tok)) #WORDTERMINATORS
#### return tok #WORDTERMINATORS
#### No pushback. Get a token. #WORDTERMINATORS
raw = self.read_token()
# Handle inclusions
if self.source_triggers is not None:
while raw in self.source_triggers:
fname=self.read_token()
spec = self.sourcehook(fname)
if spec:
(newfile, newstream) = spec
if ((raw not in self.source_triggers_x) or
(newfile not in self.source_files_restricted)):
self.push_source(newstream, newfile)
if raw in self.source_triggers_x:
self.source_files_restricted.add(newfile)
else:
if self.debug >= 0:
sys.stderr.write('\ndebug warning: duplicate attempt to import file:\n \"'+newfile+'\"\n')
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("TtreeShlex: token=" + repr(raw))
else:
print("TtreeShlex: token=EOF")
if raw == self.eof: #WORDTERMINATORS
self.end_encountered = True #WORDTERMINATORS
return raw
def read_char(self):
if self.pushback: #WORDTERMINATORS
nextchar = self.pushback.popleft() #WORDTERMINATORS
assert((type(nextchar) is str) and (len(nextchar)==1)) #WORDTERMINATORS
else: #WORDTERMINATORS
nextchar = self.instream.read(1) #WORDTERMINATORS
return nextchar
def read_token(self):
self.prev_space_terminator = '' #WORDTERMINATORS
quoted = False
escapedstate = ' '
while True:
#### self.pushback is now a stack of characters, not tokens #WORDTERMINATORS
nextchar = self.read_char()
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("TtreeShlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("TtreeShlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
# Keep track of which whitespace
# character terminated the token.
self.prev_space_terminator = nextchar #WORDTERMINATORS
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif TtreeShlex._belongs_to(nextchar, #WORDTERMINATORS
self.wordchars, #WORDTERMINATORS
self.wordterminators):#WORDTERMINATORS
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("TtreeShlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("Error at or before "+self.error_leader()+"\n"
" No closing quotation.")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("TtreeShlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("TtreeShlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
# Keep track of which whitespace
# character terminated the token.
self.prev_space_terminator = nextchar #WORDTERMINATORS
break # emit current token
else:
continue
elif nextchar in self.commenters:
comment_contents = self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
# Keep track of which character(s) terminated
# the token (including whitespace and comments).
self.prev_space_terminator = nextchar + comment_contents #WORDTERMINATORS
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif (TtreeShlex._belongs_to(nextchar, #WORDTERMINATORS
self.wordchars, #WORDTERMINATORS
self.wordterminators)#WORDTERMINATORS
or (nextchar in self.quotes) #WORDTERMINATORS
or (self.whitespace_split)): #WORDTERMINATORS
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("TtreeShlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("TtreeShlex: raw token=" + repr(result))
else:
print("TtreeShlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
newfile = RemoveOuterQuotes(newfile)
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile_full = os.path.join(os.path.dirname(self.infile), newfile)
try:
f = open(newfile_full, "r")
except IOError:
# If not found,
err = True
# ...then check to see if the file is in one of the
# directories in the self.include_path list.
for d in self.include_path:
newfile_full = os.path.join(d, newfile)
try:
f = open(newfile_full, "r")
err = False
break
except IOError:
err=True
if err:
raise InputError('Error at '+self.error_leader()+'\n'
' unable to open file \"'+newfile+'\"\n'
' for reading.\n')
return (newfile, f)
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def __bool__(self):
return not self.end_encountered
# For compatibility with python 2.x, I must also define:
def __nonzero__(self):
return self.__bool__()
# The split() function was originally from shlex
# It is included for backwards compatibility.
def split(s, comments=False, posix=True):
lex = TtreeShlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
##################### NEW ADDITIONS (may be removed later) #################
#"""
# -- linelex.py --
#linelex.py defines the LineLex class, which inherits from, and further
#augments the capabilities of TtreeShlex by making it easier to parse
#individual lines one at a time. (The original shlex's "source" inclusion
#ability still works when reading entire lines, and lines are still counted.)
#
#"""
#import sys
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def ErrorLeader(infile, lineno):
return '\"'+infile+'\", line '+str(lineno)
class SrcLoc(object):
""" SrcLoc is essentially nothing more than a 2-tuple containing the name
of a file (str) and a particular line number inside that file (an integer).
"""
def __init__(self, infile='', lineno=-1):
self.infile = infile
self.lineno = lineno
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
tokens = []
token = ''
reading_token = True
escaped_state = False
quote_state = None
for c in string:
if (c in comment_char) and (not escaped_state) and (quote_state==None):
tokens.append(token)
return tokens
elif (c in delimiters) and (not escaped_state) and (quote_state==None):
if reading_token:
tokens.append(token)
token = ''
reading_token = False
elif c in escape:
if escaped_state:
token += c
reading_token = True
escaped_state = False
else:
escaped_state = True
# and leave c (the '\' character) out of token
elif (c in quotes) and (not escaped_state):
if (quote_state != None):
if (c == quote_state):
quote_state = None
else:
quote_state = c
token += c
reading_token = True
else:
if (c == 'n') and (escaped_state == True):
c = '\n'
elif (c == 't') and (escaped_state == True):
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
if len(string) > 0:
tokens.append(token)
return tokens
def EscCharStrToChar(s_in, escape='\\'):
"""
EscCharStrToChar() replaces any escape sequences
in a string with their 1-character equivalents.
"""
assert(len(escape) > 0)
out_lstr = []
escaped_state = False
for c in s_in:
if escaped_state:
if (c == 'n'):
out_lstr.append('\n')
elif (c == 't'):
out_lstr.append('\t')
elif (c == 'r'):
out_lstr.append('\r')
elif (c == 'f'):
out_lstr.append('\f')
elif (c == '\''):
out_lstr.append('\'')
elif (c == '\"'):
out_lstr.append('\"')
elif c in escape:
out_lstr.append(c)
else:
out_lstr.append(escape+c) # <- keep both characters
escaped_state = False
else:
if c in escape:
escaped_state = True
else:
out_lstr.append(c)
return ''.join(out_lstr)
def SafelyEncodeString(in_str,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
SafelyEncodeString(in_str) scans through the input string (in_str),
and returns a new string in which probletic characters
(like newlines, tabs, quotes, etc), are replaced by their two-character
backslashed equivalents (like '\n', '\t', '\'', '\"', etc).
The escape character is the backslash by default, but it too can be
overridden to create custom escape sequences
(but this does not effect the encoding for characters like '\n', '\t').
"""
assert(len(escape) > 0)
out_lstr = []
use_outer_quotes = False
for c in in_str:
if (c == '\n'):
c = '\\n'
elif (c == '\t'):
c = '\\t'
elif (c == '\r'):
c = '\\r'
elif (c == '\f'):
c = '\\f'
elif c in quotes:
c = escape[0]+c
elif c in escape:
c = c+c
elif c in delimiters:
use_outer_quotes = True
# hmm... that's all that comes to mind. Did I leave anything out?
out_lstr.append(c)
if use_outer_quotes:
out_lstr = ['\"'] + out_lstr + ['\"']
return ''.join(out_lstr)
def RemoveOuterQuotes(text, quotes='\"\''):
if ((len(text)>=2) and (text[0] in quotes) and (text[-1]==text[0])):
return text[1:-1]
else:
return text
def MaxLenStr(s1, s2):
if len(s2) > len(s1):
return s2
else:
return s1
#def IsRegex(pat):
# """
# Check to see if string (pat) is bracketed by slashes.
#
# """
# return (len(pat)>=2) and (pat[0]=='/') and (pat[-1] == '/')
def HasWildCard(pat):
"""
Returns true if a string (pat) contains a '*' or '?' character.
"""
return (pat.find('*') != -1) or (pat.find('?') != -1)
#def HasWildCard(pat):
# """
# Returns true if a string (pat) contains a non-backslash-protected
# * or ? character.
#
# """
# N=len(pat)
# i=0
# while i < N:
# i = pat.find('*', i, N)
# if i == -1:
# break
# elif (i==0) or (pat[i-1] != '\\'):
# return True
# i += 1
# i=0
# while i < N:
# i = pat.find('?', i, N)
# if i == -1:
# break
# elif (i==0) or (pat[i-1] != '\\'):
# return True
# i += 1
# return False
def MatchesPattern(s, pattern):
if type(pattern) is str:
#old code:
#if ((len(s) > 1) and (s[0] == '/') and (s[-1] == '/'):
# re_string = p[1:-1] # strip off the slashes '/' and '/'
# if not re.search(re_string, s):
# return False
#new code:
# uses precompiled regular expressions (See "pattern.search" below)
if HasWildCard(pattern):
if not fnmatch.fnmatchcase(s, pattern):
return False
elif s != pattern:
return False
else:
#assert(type(p) is _sre.SRE_Match)
# I assume pattern = re.compile(some_reg_expr)
if not pattern.search(s):
return False
return True
def MatchesAll(multi_string, pattern):
assert(len(multi_string) == len(pattern))
for i in range(0, len(pattern)):
if not MatchesPattern(multi_string[i], pattern[i]):
return False
return True
class LineLex(TtreeShlex):
""" This class extends the TtreeShlex module (a slightly modified
version of the python 3.2.2 version of shlex). LineLex has the
ability to read one line at a time (in addition to one token at a time).
(Many files and scripts must be parsed one line at a time instead of one
token at a time. In these cases, the whitespace position also matters.)
Arguably, this class might not be necessary.
I could get rid of this class completely. That would be nice. To do that
we would need to augment and generalize shlex's get_token() member function
to make it read lines, not just tokens. Of course, you can always
change the wordchars (or wordterminators). Even so, there are two other
difficulties using the current version of shlex.get_token() to read lines:
1) File inclusion happen whenever the beginning of a line/token matches one
of the "source_triggers" (not the whole line as required by get_token()).
2) Lines ending in a special character (by default the backslash character)
continue on to the next line.
This code seems to work on our test files, but I'm sure there are bugs.
Andrew 2012-3-25
"""
def __init__(self,
instream=None,
infile=None,
posix=False):
TtreeShlex.__init__(self, instream, infile, posix)
self.line_terminators = '\n'
self.line_extend_chars = '\\'
self.skip_comments_during_readline = True
def _StripComments(self, line):
if self.skip_comments_during_readline:
for i in range(0, len(line)):
if ((line[i] in self.commenters) and
((i==0) or (line[i-1] not in self.escape))):
return line[:i]
return line
def _ReadLine(self,
recur_level=0):
"""
This function retrieves a block of text, halting at a
terminal character. Escape sequences are respected.
The self.lineno (newline counter) is also maintained.
The main difference between Readline and get_token()
is the way they handle the "self.source_triggers" member.
Both Readline() and get_token() insert text from other files when they
encounter a string in "self.source_triggers" in the text they read.
However ReadLine() ONLY inserts text from other files if the token which
matches with self.source_triggers appears at the beginning of the line.
get_token() inserts text only if lex.source matches the entire token.
comment-to-self:
At some point, once I'm sure this code is working, I should replace
shlex.get_token() with the code from ReadLine() which is more general.
It would be nice to get rid of "class LineLex" entirely. ReadLine()
is the only new feature that LineLex which was lacking in shlex.
To do this I would need to add a couple optional arguments to
"get_token()", allowing it to mimic ReadLine(), such as:
"override_wordterms" argument (which we can pass a '\n'), and
"token_extender" argument (like '\' for extending lines)
"""
first_token=''
line = ''
escaped_state = False
found_space = False
while True:
nextchar = self.read_char()
#sys.stderr.write('nextchar=\"'+nextchar+'\"\n')
while nextchar == '':
if not self.filestack:
return self._StripComments(line), '', first_token, found_space
else:
self.pop_source()
nextchar = self.read_char()
if nextchar == '\n':
self.lineno += 1
if escaped_state:
escaped_state = False
else:
if nextchar in self.escape:
line += nextchar
escaped_state = True
else:
escaped_state = False
if not escaped_state:
if (nextchar in self.whitespace):
found_space = True
while first_token in self.source_triggers:
fname = RemoveOuterQuotes(self.get_token())
if (fname == '') or (fname in self.source_triggers):
raise InputError('Error: near '+self.error_leader()+'\n'
' Nonsensical file inclusion request.\n')
if self.debug >= 0:
sys.stderr.write( (' ' * recur_level) +
'reading file \"'+fname+'\"\n')
spec = self.sourcehook(fname)
if spec:
(fname, subfile) = spec
if ((first_token not in self.source_triggers_x) or
(fname not in self.source_files_restricted)):
self.push_source(subfile, fname)
if first_token in self.source_triggers_x:
self.source_files_restricted.add(fname)
else:
if self.debug >= 0:
sys.stderr.write('\nWarning at '+self.error_leader()+':\n'
' duplicate attempt to import file:\n \"'+fname+'\"\n')
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level+1)
if nextchar in self.line_terminators:
line_nrw = line.rstrip(self.whitespace)
#sys.stderr.write('line_nrw=\"'+line_nrw+'\"\n')
if ((len(line_nrw) > 0) and
(line_nrw[-1] in self.line_extend_chars) and
((len(line_nrw) < 2) or (line_nrw[-2] not in self.escape))):
line = line_nrw[:-1] #delete the line_extend character
# from the end of that line and keep reading...
else:
return self._StripComments(line), nextchar, first_token, found_space
else:
line += nextchar
if not found_space:
first_token += nextchar
def ReadLine(self, recur_level=0):
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level)
if nextchar == self.eof:
self.end_encountered = True
return line + nextchar
@staticmethod
def TextBlock2Lines(text, delimiters, keep_delim=True):
""" This splits a string into a list of sub-strings split by delimiter
characters. This function is different from the standard str.split()
function: The string is split at every character which belongs to the
"delimiters" argument (which can be a string or some other container).
This character is included at the end of every substring. Example:
TextBlock2Lines('\nabc\nde^fg\nhi j\n', '^\n')
returns:
['\n', 'abc\n', 'de^', 'fg\n', 'hi j\n']
"""
ls = []
i = 0
i_prev = 0
while i < len(text):
if text[i] in delimiters:
if keep_delim:
ls.append(text[i_prev:i+1])
else:
ls.append(text[i_prev:i])
i_prev = i+1
i += 1
if (i_prev < len(text)):
ls.append(text[i_prev:i+1])
return ls
def __iter__(self):
return self
def __next__(self):
line = self.ReadLine()
if line == self.eof:
raise StopIteration
return line
class OSrcLoc(object):
""" OSrcLoc is barely more than a 2-tuple containing the name of a file
(a string) and a particular line number inside that file (an integer).
These objects are passed around and stored in the nodes of
every tree, so that if a syntax error or broken link in that node
is discovered, an error message can be provided to the user.
"order"
Later on, during development, the "order" member was added. Why:
If you want to know whether block of text comes before or after a
different block of text, unfortunately you can not just compare the
corresponding line numbers of the files they come from because the
files may differ, and multiple short blocks of text may occupy the
same line. Consequently, "OSrcLoc" also maintains an internal
counter which keeps track of how many OSrcLoc() objects have been
created so far. (This can be useful if the user requests that
variables and commands be assigned in a non-standard order.)
The "order" member is assigned to this counter.
Most of the time, the "order" member can be ignored.
"""
count = 0
def __init__(self, infile='', lineno=-1):
self.infile = infile
self.lineno = lineno
OSrcLoc.count += 1
self.order = OSrcLoc.count
def __lt__(self, x):
return self.order < x.order
#def __repr__(self):
# return repr((self.infile, self.lineno, self.order))
class TextBlock(object):
"""TextBlock is just a 3-tuple consisting of a string, and an OSrcLoc
to help locate it in the original file from which it was read."""
def __init__(self, text, srcloc): #srcloc_end):
self.text = text
if srcloc == None:
self.srcloc = OSrcLoc()
else:
self.srcloc = srcloc
#if srcloc_end == None:
# self.srcloc_end = OSrcLoc()
#else:
# self.srcloc_end = srcloc_end
def __repr__(self):
return '\"'+self.text+'\"'
class VarRef(object):
"""VarRef stores variable names, and paths, and other attribute information,
as well as a "OSrcLoc" to keep track of the file it was defined in."""
def __init__(self,
prefix = '', # '$' or '${'
descr_str = '', # <- descriptor string: "cpath/category:lpath"
suffix = '', # '}'
srcloc = None,# location in file where defined
binding = None,# a pointer to a tuple storing the value
nptr = None):# <- see class VarNPtr
self.prefix = prefix #Any text before the descriptor string goes here
self.suffix = suffix #Any text after the descriptor string goes here
self.descr_str = descr_str
if srcloc == None: # <- Location in text file where variable appears
self.srcloc = OSrcLoc()
else:
self.srcloc = srcloc
self.binding = binding
if nptr == None:
self.nptr = VarNPtr()
else:
self.nptr = nptr
def __lt__(self, x):
return self.order < x.order
#def __repr__(self):
# return repr((self.prefix + self.descr_str + self.suffix, srcloc))
class VarNPtr(object):
"""
Every time a variable appears in a template, it has has a "descritpor".
For example, consider the variable
"$atom:CA"
This is a string which encodes 3 pieces of information.
1) the category name: This is essentialy indicates the variable's type.
(ie "atom", in the example above)
2) the category node: Some TYPES have limited scope. Users can
specify the root node of the portion of the tree
in which this variable's type makes sense.
If this node is the root node, then that category
is relevant everywhere, and is not molecule or class
specific. All variables have a category node, which
is often not explicitly defined to by the user.
It must be inferred/determined.)
(Category node = the root "/", in the example above.)
3) the leaf node: This is a node whose ".name" member matches the name
of a variable. This node is created for this purpose
and it's position in the tree is a reflection of
that variable's intended scope.
In a molecule this "name" might be the name
of a type of atom, or an atom ID, or a bond type,
which is found in a particular molecule.
(Leaf node would be named "CA" in the example above.)
The VarNPtr class is simply a 3-tuple which
keeps these 3 pieces of data together.
"""
def __init__(self, cat_name='', cat_node=None, leaf_node=None):
self.cat_name = cat_name
self.cat_node = cat_node
self.leaf_node = leaf_node
#def __repr__(self):
# return repr((self.cat_name, self.cat_node.name, self.leaf_node.name))
class VarBinding(object):
""" VarBinding is essentially a tuple consistng of (full_name, binding, refs):
"self.full_name" is canonical name for this variable. This is a string
which specifies full path leading to the category node (beginning with '/'),
the category name (followed by a ':'),
as well as the leaf node (including the path leading up to it from cat_node)
This triplet identifies the variable uniquely.
"self.value" is the data that the variable refers to (usually a string).
"self.refs" stores a list of VarRefs which mention the same variable
from the various places inside various templates in the tree.
"""
def __init__(self,
full_name = '',
nptr = None,
value = None,
refs = None,
order = None,
category = None):
self.full_name = full_name
self.nptr = nptr
self.value = value
self.refs = refs
self.order = order
self.category = category
def __lt__(self, x):
return self.order < x.order
def __repr__(self):
return repr((self.full_name, self.value, self.order))
def ExtractCatName(descr_str):
""" When applied to a VarRef's "descr_str" member,
this function will extract the "catname" of it's corresponding
"nptr" member. This can be useful for error reporting.
(I use it to insure that the user is using the correct counter
variable types at various locations in their input files.)
"""
ib = descr_str.find(':')
if ib == -1:
ib = len(descr_str)
ia = descr_str.rfind('/')
if ia == -1:
ia = 0
return descr_str[ia:ib]
else:
str_before_colon = descr_str[0:ib]
ia = str_before_colon.rfind('/')
if ia == -1:
return str_before_colon
else:
return str_before_colon[ia+1:]
def _DeleteLineFromTemplate(tmpl_list,
i_entry, # index into tmpl_list
newline_delimiter='\n'):
""" Delete a single line from tmpl_list.
tmpl_list is an alternating list of VarRefs and TextBlocks.
To identify the line, the index corresponding to one of the
entries in the tmpl_list is used. (Usually it is a VarRef)
The text after the preceeding newline, and the text up to the next newline
(starting from the beginning of the current entry, if a TextBlock)
is deleted, including any VarRef (variables) located in between.
It returns the index corresponding to the next
entry in the list (after deletion).
"""
i_prev_newline = i_entry
while i_prev_newline >= 0:
entry = tmpl_list[i_prev_newline]
if isinstance(entry, TextBlock):
i_char_newline = entry.text.rfind(newline_delimiter)
if i_char_newline != -1: # then newline found
# Delete the text after this newline
entry.text = entry.text[:i_char_newline+1]
break
i_prev_newline -= 1
first_var = True
#i_next_newline = i_entry
i_next_newline = i_prev_newline+1
while i_next_newline < len(tmpl_list):
entry = tmpl_list[i_next_newline]
if isinstance(entry, TextBlock):
i_char_newline = entry.text.find(newline_delimiter)
if i_char_newline != -1: # then newline found
# Delete the text before this newline (including the newline)
entry.text = entry.text[i_char_newline+1:]
break
# Invoke DeleteSelf() on the first variables on this line. This will
# insure that it is deleted from the ttree_assignments.txt file.
elif isinstance(entry, VarRef):
if first_var:
entry.nptr.leaf_node.DeleteSelf()
first_var = False
i_next_newline += 1
del tmpl_list[i_prev_newline + 1 : i_next_newline]
return i_prev_newline + 1
def DeleteLinesWithBadVars(tmpl_list,
delete_entire_template = False,
newline_delimiter = '\n'):
"""
Loop through the entries in a template,
an alternating list of TextBlocks and VarRefs (tmpl_list).
If a VarRef points to a leaf_node which no longer exists
(ie. no longer in the corresponding category's .bindings list).
Then delete the line it came from from the template (tmpl_list).
"""
out_str_list = []
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
var_bindings = var_ref.nptr.cat_node.categories[var_ref.nptr.cat_name].bindings
#if var_ref.nptr.leaf_node not in var_bindings:
if var_ref.nptr.leaf_node.IsDeleted():
if delete_entire_template:
del tmpl_list[:]
return 0
else:
i = _DeleteLineFromTemplate(tmpl_list,
i,
newline_delimiter)
else:
i += 1
else:
i += 1
def SplitTemplate(ltmpl, delim, delete_blanks = False):
"""
Split a template "ltmpl" into a list of "tokens" (sub-templates)
using a single delimiter string "delim".
INPUT arguments:
"ltmpl" should be an list of TextBlocks and VarRefs.
"delim" should be a simple string (type str)
"delete_blanks" should be a boolean True/False value.
When true, successive occurrences of the delimiter
should not create blank entries in the output list.
OUTPUT:
A list of tokens.
Each "token" is either a TextBlock, a VarRef,
or a (flat, 1-dimensional) list containing more than one of these objects.
The number of "tokens" returned equals the number of times the delimiter
is encountered in any of the TextBlocks in the "ltmpl" argument, plus one.
(... Unless "delete_blanks" is set to True.
Again, in that case, empty entries in this list are deleted.)
"""
assert(type(delim) is str)
if not hasattr(ltmpl, '__len__'):
ltmpl = [ltmpl]
tokens_lltmpl = []
token_ltmpl = []
i = 0
while i < len(ltmpl):
entry = ltmpl[i]
if isinstance(entry, TextBlock):
#if hasattr(entry, 'text'):
prev_src_loc = entry.srcloc
tokens_str = entry.text.split(delim)
lineno = entry.srcloc.lineno
j = 0
while j < len(tokens_str):
token_str = tokens_str[j]
delim_found = False
if (j < len(tokens_str)-1):
delim_found = True
if token_str == '':
if delete_blanks:
if delim == '\n':
lineno += 1
if len(token_ltmpl) > 0:
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
j += 1
continue
new_src_loc = OSrcLoc(prev_src_loc.infile, lineno)
new_src_loc.order = prev_src_loc.order
for c in token_str:
# Reminder to self: c != delim (so c!='\n' if delim='\n')
# (We keep track of '\n' characters in delimiters above.)
if c == '\n':
lineno +=1
new_src_loc.lineno = lineno
text_block = TextBlock(token_str,
new_src_loc)
prev_src_loc = new_src_loc
if len(token_ltmpl) == 0:
if delim_found:
tokens_lltmpl.append(text_block)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
else:
if delim_found:
if len(token_str) > 0:
token_ltmpl.append(text_block)
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
assert(not delete_blanks)
if (isinstance(token_ltmpl[-1], VarRef)
and
((j>0)
or
((j == len(tokens_str)-1) and
(i == len(ltmpl)-1))
)):
# In that case, this empty token_str corresponds
# to a delimiter which was located immediately
# after the variable name,
# AND
# -there is more text to follow,
# OR
# -we are at the end of the template.
token_ltmpl.append(text_block)
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
if (delim_found and (delim == '\n')):
lineno += 1
j += 1
elif isinstance(entry, VarRef):
#elif hasattr(entry, 'descr_str'):
lineno = entry.srcloc.lineno
if ((len(token_ltmpl) == 1) and
isinstance(token_ltmpl[0], TextBlock) and
(len(token_ltmpl[0].text) == 0)):
# special case: if the previous entry was "", then it means
# the delimeter appeared at the end of the previous text block
# leading up to this variable. It separates the variable from
# the previous text block. It is not a text block of length 0.
token_ltmpl[0] = entry
else:
token_ltmpl.append(entry)
elif entry == None:
token_ltmpl.append(entry)
else:
assert(False)
i += 1
# Append left over remains of the last token
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
elif len(token_ltmpl) > 1:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
return tokens_lltmpl
def SplitTemplateMulti(ltmpl, delims, delete_blanks=False):
"""
Split a template "ltmpl" into a list of templates using a
single one or more delimiter strings "delim_list".
If multiple delimiter strings are provided, splitting
begins using the first delimiter string in the list.
Then each token in the resulting list of templates
is split using the next delimiter string
and so on until we run out of delimiter strings.
"ltmpl" should be an list of TextBlocks and VarRefs.
"delims" should be a simple string (type str) or a list of strings
"delete_blanks" is either True or False
If True, then any blank entries in the resulting list of
tokens (sub-templates) will be deleted.
"""
if hasattr(delims, '__len__'): # then it hopefully is a list of strings
delim_list = delims
else:
delim_list = [delims] # then it hopefully is a string
tokens = [ltmpl]
for delim in delim_list:
assert(type(delim) is str)
tokens_il = []
for t in tokens:
sub_tokens = SplitTemplate(t, delim, delete_blanks)
for st in sub_tokens:
if hasattr(st, '__len__'):
if (len(st) > 0) or (not delete_blanks):
tokens_il.append(st)
else:
tokens_il.append(st)
tokens = tokens_il
del tokens_il
return tokens
def _TableFromTemplate(d, ltmpl, delimiters, delete_blanks):
"""
See the docstring for the TableFromTemplate() function for an explanation.
(This _TableFromTemplate() and SplitTemplate() are the workhorse functions
for TableFromTemplate().)
"""
output = SplitTemplateMulti(ltmpl, delimiters[d], delete_blanks[d])
if d > 0:
i = 0
while i < len(output):
output[i] = _TableFromTemplate(d-1,
output[i],
delimiters,
delete_blanks)
# Delete empty LISTS?
if (delete_blanks[d] and
hasattr(output[i], '__len__') and
(len(output[i]) == 0)):
del output[i]
else:
i += 1
return output
def TableFromTemplate(ltmpl, delimiters, delete_blanks=True):
"""
This function can be used to split a template
(a list containing TextBlocks and VarRefs) into a table
into a multidimensional table, with an arbitrary number of dimensions.
Arguments:
ltmpl
An alternating list of TextBlocks and VarRefs containing
the contents of this text template.
delimiters
The user must supply a list or tuple of delimiters: one delimiter for
each dimension in the table, with low-priority delimiters
(such as spaces ' ') appearing first, and higher-priority delimiters
(sich as newlines '\n') appearing later on in the list.
This function will divide the entire "ltmpl" into an n-dimensional
table. Initially the text is split into a list of text using the
highest-priority delimiter. Then each entry in the resulting list is
split into another list according to the next highest-priority delimiter.
This continues until all of the delimiters are used up and an
n-dimensional list-of-lists is remaining.
delete_blanks
The optional "delete_blanks" argument can be used to indicate whether
or not to delete blank entries in the table (which occur as a result
of placing two delimiters next to each other). It should be either
None (default), or it should be an array of booleans matching the
size of the "delimiters" argument. This allows the caller to customize
the merge settings separately for each dimension (for example: to allow
merging of whitespace within a line, without ignoring blank lines).
---- Details: ----
1) Multi-character delimiters ARE allowed (like '\n\n').
2) If a delimiter in the "delimiters" argument is not a string
but is a tuple (or a list) of strings, then the text is split according
to any of the delimiters in that tuple/list (starting from the last entry).
This way, users can use this feature to split text according to multiple
different kinds of whitespace characters (such as ' ' and '\t'), for
example, buy setting delimiters[0] = (' ','\t'). If, additionally,
delete_blanks[0] == True, then this will cause this function to
divide text in without regard to whitespace on a given line (for example).
Detailed example:
table2D = TableFromTmplList(ltmpl,
delimiters = ((' ','\t'), '\n'),
delete_blanks = (True, False))
This divides text in a similar way that the "awk" program does by default,
ie, by ignoring various kinds of whitespace between text fields, but NOT
ignoring blank lines.
3) Any text contained in variable-names is ignored.
"""
# Make a copy of ltmpl
# (The workhorse function "_TableFromTemplate()" makes in-place changes to
# its "ltmpl" argument. I don't want to modify "ltmpl", so I make a copy
# of it before I invoke "_TableFromTemplate()" on it.)
output = [ltmpl[i] for i in range(0, len(ltmpl))]
d = len(delimiters) - 1
output = _TableFromTemplate(d, output, delimiters, delete_blanks)
return output
class TemplateLexer(TtreeShlex):
""" This class extends the standard python lexing module, shlex, adding a
new member function (ReadTemplate()), which can read in a block of raw text,
(halting at an (non-escaped) terminal character), and split the text into
alternating blocks of text and variables. (As far as this lexer is
concerned, "variables" are simply tokens preceeded by $ or @ characters,
and surrounded by optional curly-brackets {}.)
"""
def __init__(self,
instream=None,
infile=None,
posix=False):
TtreeShlex.__init__(self, instream, infile, posix)
self.var_delim = '$@' #characters which can begin a variable name
self.var_open_paren = '{' #optional parenthesis surround a variable
self.var_close_paren = '}' #optional parenthesis surround a variable
self.newline = '\n'
self.comment_skip_var = '#'
# Which characters belong in words?
#
# We want to allow these characters:
# ./$@&%^!*~`-_:;?<>[]()
# to appear inside the tokens that TtreeShlex.get_token()
# retrieves (TtreeShlex.get_token() is used to read class
# names, and instance names, and variable names)
#
# settings.lex.wordchars+='./$@&%^!*~`-_+:;?<>[]' #Allow these chars
#
# Ommisions:
# Note: I left out quotes, whitespace, comment chars ('#'), and escape
# characters ('\\') because they are also dealt with separately.
# Those characters should not overlap with settings.lex.wordchars.
#
# Enabling unicode support requires that we override this choice
# by specifying "lex.wordterminators" instead of "wordchars".
#
# lex.wordterminators should be the (printable) set inverse of lex.wordchars
# I'm not sure which ascii characters are NOT included in the string above
# (We need to figure that out, and put them in settings.lex.wordterminators)
# To figure that out, uncomment the 8 lines below:
#
#self.wordterminators=''
#for i in range(0,256):
# c = chr(i)
# if c not in self.wordchars:
# self.wordterminators += c
#sys.stderr.write('-------- wordterminators = --------\n')
#sys.stderr.write(self.wordterminators+'\n')
#sys.stderr.write('-----------------------------------\n')
#
# Here is the result:
self.wordterminators = '(),={|}' + \
self.whitespace + \
self.quotes + \
self.escape + \
self.commenters
# Note:
# self.whitespace = ' \t\r\f\n'
# self.quotes = '\'"'
# self.escape = '\\'
# self.commenters = '#'
self.source_triggers=set(['include','import'])
self.source_triggers_x=set(['import'])
def GetSrcLoc(self):
return OSrcLoc(self.infile, self.lineno)
def ReadTemplate(self,
simplify_output=False,
terminators='}',
other_esc_chars='{',
keep_terminal_char = True):
"""
ReadTemplate() reads a block of text (between terminators)
and divides it into variables (tokens following a '$' or '@' character)
and raw text. This is similar to pythons string.Template(),
however it reads from streams (files), not strings, and it allows use
of more complicated variable names with multiple variable delimiters
(eg '$' and '@').
This readline()-like member function terminates when reaching a
user-specified terminator character character (second argument),
or when variable (eg: "$var"$ is encountered). The result is
a list of variable-separated text-blocks (stored in the first
argument). For example, the string:
"string with $var1 and $var2 variables.}" contains:
"string with ",
$var1,
" and ",
$var2,
" variables.}"
This simplifies the final process of rendering
(substituting text into) the text blocks later on.
Output:
This function returns a list of (alternating) blocks of
text, and variable names. Each entry in the list is either:
1) a text block:
Raw text is copied from the source, verbatim, along with
some additional data (filename and line numbers), to
help retroactively identify where the text came from
(in case a syntax error in the text is discovered later).
In this case, the list entry is stored as a list
The format (TextBlock) is similar to:
[text_string, ((filenameA,lineBegin), (filenameB,lineEnd))],
where the tuples, (filenameA,lineBegin) and (filenameB,lineEnd)
denote the source file(s) from which the text was read, and
line number at the beginning and ending of the text block.
(This information is useful for generating helpful error
messages. Note that the "TtreeShlex" class allows users to
combine multiple files transparently into one stream using
the "source" (or "sourcehook()") member. For this reason, it
is possible, although unlikely, that the text-block
we are reading could span multiple different files.)
2) a variable (for example "$var" or "${var}"):
In this case, the list entry is stored in the "VarRef" format
which is essentialy shown below:
[[var_prefix, var_nptr, var_suffix], (filename,lineno)]
where var_prefix and var_suffix are strings containing brackets
and other text enclosing the variable name (and may be empty).
As an example, we consider a file named "datafile" which
contains the text containing 2 text blocks and 1 variable:
"some\n text\n before ${var}. Text after\n".
ReadTemplate() will read this and return a list with 3 entries:
[ ['some\n text\n before', (('datafile', 1), ('datafile', 3))],
[['${', 'var', '}'], ('datafile', 3, 3)],
['Text after\n', (('datafile', 3), ('datafile', 4))] ]
Note that while parsing the text, self.lineno counter is
incremented whenever a newline character is encountered.
(Also: Unlike shlex.get_token(), this function does not
delete commented text, or insert text from other files.)
Exceptional Cases:
Terminator characters are ignored if they are part of a variable
reference. (For example, the '}' in "${var}", is used to denote a
bracketed variable, and does not cause ReadTemplate() to stop reading)
OR if they are part of a two-character escape sequence
(for example, '}' in "\}" does not cause terminate parsing).
In that case, the text is considered normal text. (However the
'\' character is also stripped out. It is also stripped out if it
preceeds any characters in "other_esc_chars", which is
the second argument. Otherwise it is left in the text block.)
"""
#print(' ReadTemplate('+terminators+') invoked at '+self.error_leader())
# The main loop of the parser reads only one variable at time.
# The following variables keep track of where we are in the template.
reading_var=False # Are we currently reading in the name of a variable?
prev_char_delim=False #True iff we just read a var_delim character like '$'
escaped_state=False #True iff we just read a (non-escaped) esc character '\'
commented_state=False #True iff we are in a region of text where vars should be ignored
var_paren_depth=0 # This is non-zero iff we are inside a
# bracketed variable's name for example: "${var}"
var_terminators = self.whitespace + self.newline + self.var_delim + '{}'
tmpl_list = [] # List of alternating tuples of text_blocks and
# variable names (see format comment above)
# This list will be returned to the caller.
#sys.stderr.write('report_progress='+str(report_progress))
prev_filename = self.infile
prev_lineno = self.lineno
var_prefix = ''
var_descr_plist = []
var_suffix = ''
text_block_plist = []
done_reading = False
while not done_reading:
terminate_text = False
terminate_var = False
#delete_prior_escape = False
nextchar = self.read_char()
#print(' ReadTemplate() nextchar=\''+nextchar+'\' at '+self.error_leader()+' esc='+str(escaped_state)+', pvar='+str(prev_char_delim)+', paren='+str(var_paren_depth))
# Count newlines:
if nextchar in self.newline:
commented_state = False
self.lineno += 1
elif ((nextchar in self.comment_skip_var) and
(not escaped_state)):
commented_state = True
# Check for end-of-file:
if nextchar == '':
if escaped_state:
raise InputError('Error: in '+self.error_leader()+'\n\n'
'No escaped character.')
if reading_var:
terminate_var = True
else:
terminate_text = True
done_reading = True
# --- Now process the character: ---
# What we do next depends on which "mode" we are in.
# If we are reading a regular text block (reading_var == False),
# then we keep appending characters onto the end of "text_block",
# checking for terminal characters, or variable delimiters.
# If we are reading a variable name (reading_var == True),
# then we append characters to the end of "var_descr_plist[]",
# checking for variable terminator characters, as well as
# parenthesis (some variables are surrounded by parenthesis).
elif reading_var:
if nextchar in terminators:
#sys.stdout.write(' ReadTemplate() readmode found terminator.\n')
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
if not (nextchar in self.var_close_paren):
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
elif not ((var_paren_depth>0) and (nextchar in self.var_close_paren)):
terminate_var = True
done_reading = True
if nextchar in self.var_open_paren: # eg: nextchar == '{'
#sys.stdout.write(' ReadTemplate() readmode found {.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '{' as a variable prefix
#delete_prior_escape=True # so delete the '\' character
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
else:
# "${var}" is a valid way to refer to a variable
if prev_char_delim:
var_prefix += nextchar
var_paren_depth = 1
# "${{var}}" is also a valid way to refer to a variable,
# (although strange), but "$va{r}" is not.
# Parenthesis (in bracketed variable names) must
# immediately follow the '$' character (as in "${var}")
elif var_paren_depth > 0:
var_paren_depth += 1
elif nextchar in self.var_close_paren:
#sys.stdout.write(' ReadTemplate() readmode found }.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '}' as a variable suffix,
#delete_prior_escape=True #so skip the '\' character
if (nextchar not in terminators):
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
else:
if var_paren_depth > 0:
var_paren_depth -= 1
if var_paren_depth == 0:
var_suffix = nextchar
terminate_var = True
elif nextchar in var_terminators:
#sys.stdout.write(' ReadTemplate() readmode found var_terminator \"'+nextchar+'\"\n')
if (escaped_state or (var_paren_depth>0)):
# In this case, the '\' char was only to prevent
# interpreting nextchar as a variable terminator
#delete_prior_escape = True # so skip the '\' character
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
else:
terminate_var = True
elif nextchar in self.var_delim: # such as '$'
#sys.stdout.write(' ReadTemplate() readmode found var_delim.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a new variable name
#delete_prior_escape = True # so skip the '\' character
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
else:
prev_var_delim = True
# Then we are processing a new variable name
terminate_var = True
else:
var_descr_plist.append(nextchar)
prev_char_delim = False
else: # begin else clause for "if reading_var:"
# Then we are reading a text_block
if nextchar in terminators:
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
del text_block_plist[-1]
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
terminate_text = True
done_reading = True
elif nextchar in self.var_delim: # such as '$'
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a variable prefix.
#delete_prior_escape=True #so delete the '\' character
del text_block_plist[-1]
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
prev_char_delim = True
reading_var = True
var_paren_depth = 0
terminate_text = True
else:
text_block_plist.append(nextchar)
#TO DO: use "list_of_chars.join()" instead of '+='
prev_char_delim = False # the previous character was not '$'
# Now deal with "other_esc_chars"
#if escaped_state and (nextchar in other_esc_chars):
if escaped_state and (nextchar in other_esc_chars):
if reading_var:
#sys.stdout.write(' ReadTemplate: var_descr_str=\''+''.join(var_descr_plist)+'\'\n')
assert(var_descr_plist[-2] in self.escape)
del var_descr_plist[-2]
else:
#sys.stdout.write(' ReadTemplate: text_block=\''+''.join(text_block_plist)+'\'\n')
assert(text_block_plist[-2] in self.escape)
del text_block_plist[-2]
if terminate_text:
#sys.stdout.write('ReadTemplate() appending: ')
#sys.stdout.write(text_block)
#tmpl_list.append( [text_block,
# ((prev_filename, prev_lineno),
# (self.infile, self.lineno))] )
if simplify_output:
tmpl_list.append(''.join(text_block_plist))
else:
tmpl_list.append(TextBlock(''.join(text_block_plist),
OSrcLoc(prev_filename, prev_lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if not done_reading:
# The character that ended the text block
# was a variable delimiter (like '$'), in which case
# we should put it (nextchar) in the variable's prefix.
var_prefix = nextchar
else:
var_prefix = ''
var_descr_plist = []
var_suffix = ''
prev_filename = self.infile
prev_lineno = self.lineno
del text_block_plist
text_block_plist = []
#gc.collect()
elif terminate_var:
# Print an error if we terminated in the middle of
# an incomplete variable name:
if prev_char_delim:
raise InputError('Error: near '+self.error_leader()+'\n\n'
'Null variable name.')
if var_paren_depth > 0:
raise InputError('Error: near '+self.error_leader()+'\n\n'
'Incomplete bracketed variable name.')
var_descr_str = ''.join(var_descr_plist)
# Now check for variable format modifiers,
# like python's ".rjust()" and ".ljust()".
# If present, then put these in the variable suffix.
if ((len(var_descr_plist)>0) and (var_descr_plist[-1]==')')):
#i = len(var_descr_plist)-1
#while i >= 0:
# if var_descr_plist[i] == '(':
# break
# i -= 1
i = var_descr_str.rfind('(')
if (((i-6) >= 0) and
((var_descr_str[i-6:i] == '.rjust') or
(var_descr_str[i-6:i] == '.ljust'))):
var_suffix =''.join(var_descr_plist[i-6:])+var_suffix
#var_descr_plist = var_descr_plist[:i-6]
var_descr_str = var_descr_str[:i-6]
# Process any special characters in the variable name
var_descr_str = EscCharStrToChar(var_descr_str)
#tmpl_list.append( [[var_prefix, var_descr_str, var_suffix],
# (self.infile, self.lineno)] )
if simplify_output:
tmpl_list.append(var_prefix + var_descr_str + var_suffix)
else:
tmpl_list.append( VarRef(var_prefix, var_descr_str, var_suffix,
OSrcLoc(self.infile, self.lineno)) )
#if report_progress:
#sys.stderr.write(' parsed variable '+var_prefix+var_descr_str+var_suffix+'\n')
#sys.stdout.write('ReadTemplate() appending: ')
#print(var_prefix + var_descr_str + var_suffix)
del var_descr_plist
del var_descr_str
prev_filename = self.infile
prev_lineno = self.lineno
var_prefix = ''
var_descr_plist = []
var_suffix = ''
# Special case: Variable delimiters like '$'
# terminate the reading of variables,
# but they also signify that a new
# variable is being read.
if nextchar in self.var_delim:
# Then we are processing a new variable name
prev_var_delim = True
reading_var = True
var_paren_depth = 0
var_prefix = nextchar
elif nextchar in self.var_close_paren:
del text_block_plist
text_block_plist = []
#gc.collect()
prev_var_delim = False
reading_var = False
else:
# Generally, we don't want to initialize the next text block
# with the empty string. Consider that whatever character
# caused us to stop reading the previous variable and append
# it to the block of text that comes after.
del text_block_plist
text_block_plist = [nextchar]
#gc.collect()
prev_var_delim = False
reading_var = False
# If we reached the end of the template (and the user requests it),
# then the terminal character can be included in the list
# of text_blocks to be returned to the caller.
if done_reading and keep_terminal_char:
#sys.stdout.write('ReadTemplate() appending: \''+nextchar+'\'\n')
# Here we create a new text block which contains only the
# terminal character (nextchar).
#tmpl_list.append( [nextchar,
# ((self.infile, self.lineno),
# (self.infile, self.lineno))] )
if simplify_output:
tmpl_list.append(nextchar)
else:
tmpl_list.append(TextBlock(nextchar,
OSrcLoc(self.infile, self.lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if escaped_state:
escaped_state = False
else:
if nextchar in self.escape:
escaped_state = True
#print("*** TMPL_LIST0 = ***", tmpl_list)
return tmpl_list # <- return value stored here
def GetParenExpr(self, prepend_str='', left_paren='(', right_paren=')'):
""" GetParenExpr() is useful for reading in strings
with nested parenthesis and spaces.
This function can read in the entire string:
.trans(0, 10.0*sin(30), 10.0*cos(30))
(Because I was too lazy to write this correctly...)
Spaces are currently stripped out of the expression.
(...unless surrounded by quotes) The string above becomes:
".trans(0,10.0*sin(30),10.0*cos(30))"
Sometimes the caller wants to prepend some text to the beginning
of the expression (which may contain parenthesis). For this
reason, an optional first argument ("prepend_str") can be
provided. By default it is empty.
"""
orig_wordterm = self.wordterminators
self.wordterminators = self.wordterminators.replace(left_paren,'').replace(right_paren,'')
token = self.get_token()
if ((token == '') or
(token == self.eof)):
return prepend_str
expr_str = prepend_str + token
#if (expr_str.find(left_paren) == -1):
# raise InputError('Error near or before '+self.error_leader()+'\n'
# 'Expected an open-paren (\"'+prepend_str+left_paren+'\") before this point.\n')
# return expr_str
paren_depth = expr_str.count(left_paren) - expr_str.count(right_paren)
while ((len(expr_str) == 0) or (paren_depth > 0)):
token = self.get_token()
if ((type(token) is not str) or
(token == '')):
raise InputError('Error near or before '+self.error_leader()+'\n'
'Invalid expression: \"'+expr_str+'\"')
expr_str += token
paren_depth = expr_str.count(left_paren) - expr_str.count(right_paren)
if (paren_depth != 0):
raise InputError('Error near or before '+self.error_leader()+'\n'
'Invalid expression: \"'+expr_str+'\"')
self.wordterminators = orig_wordterm
return expr_str
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = TtreeShlex()
else:
file = sys.argv[1]
lexer = TtreeShlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print("Token: " + repr(tt))
else:
break
|
CFDEMproject/LAMMPS
|
tools/moltemplate/src/ttree_lex.py
|
Python
|
gpl-2.0
| 82,841
|
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
|
hustodemon/spacewalk
|
backend/wsgi/__init__.py
|
Python
|
gpl-2.0
| 609
|
# ICE Revision: $Id$
"""Read a STL file and do simple manipulations"""
from os import path
from PyFoam.Error import error
from PyFoam.ThirdParty.six import next as iterNext
class STLFile(object):
"""Store a complete STL-file and do simple manipulations with it"""
noName="<no name given>"
def __init__(self,fName=None):
"""
@param fName: filename of the STL-file. If None then an empty file is created
"""
self._fp=None
if hasattr(fName, 'read'):
# seems to be a filehandle
self._fp=fName
if hasattr(fName,'name'):
self._filename=fName.name
else:
self._filename="<filehandle>"
else:
self._filename=fName
if self._fp==None:
if fName!=None:
self._contents=[l.strip() for l in open(fName).readlines()]
else:
self._contents=[]
else:
self._contents=[l.strip() for l in self._fp.readlines()]
self.resetInfo()
def resetInfo(self):
"""Set cached info to nothing"""
self._patchInfo=None
def filename(self):
"""The filename (without the full patch)"""
if self._filename==None:
return "<no filename given>"
else:
return path.basename(self._filename)
def expectedToken(self,l,token,i):
if l.strip().find(token)!=0:
error("'%s' expected in line %d of %s" % (token,i+1,self.filename()))
def erasePatches(self,patchNames):
"""Erase the patches in the list"""
processed=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch name",parts[1],"Expected",currentName)
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
keep=nextState
self._contents=processed
def mergePatches(self,patchNames,targetPatchName):
"""Merge the patches in the list and put them into a new patch"""
processed=[]
saved=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch name",parts[1],"Expected",currentName)
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
elif len(parts)>0:
if parts[0] not in ["solid","endsolid"]:
saved.append(l)
keep=nextState
self._contents=processed
self._contents.append("solid "+targetPatchName)
self._contents+=saved
self._contents.append("endsolid "+targetPatchName)
def patchInfo(self):
"""Get info about the patches. A list of dictionaries with the relevant information"""
if self._patchInfo:
return self._patchInfo
self._patchInfo=[]
newPatch=True
e=enumerate(self._contents)
goOn=True
while goOn:
try:
i,l=iterNext(e)
if newPatch:
self.expectedToken(l,"solid",i)
info={}
if len(l.split())<2:
info["name"]=self.noName
else:
info["name"]=l.split()[1]
info["start"]=i+1
info["facets"]=0
info["min"]=[1e100]*3
info["max"]=[-1e100]*3
newPatch=False
elif l.strip().find("endsolid")==0:
info["end"]=i+1
self._patchInfo.append(info)
newPatch=True
else:
self.expectedToken(l,"facet normal",i)
i,l=iterNext(e)
self.expectedToken(l,"outer loop",i)
for v in range(3):
i,l=iterNext(e)
self.expectedToken(l,"vertex",i)
info["min"]=[min(m) for m in zip(info["min"],
[float(v) for v in l.strip().split()[1:4]])]
info["max"]=[max(m) for m in zip(info["max"],
[float(v) for v in l.strip().split()[1:4]])]
i,l=iterNext(e)
self.expectedToken(l,"endloop",i)
i,l=iterNext(e)
self.expectedToken(l,"endfacet",i)
info["facets"]+=1
except StopIteration:
goOn=False
if not newPatch:
error("File",self.filename(),"seems to be incomplete")
return self._patchInfo
def writeTo(self,fName):
"""Write to a file"""
if hasattr(fName, 'write'):
f=fName
else:
f=open(fName,"w")
f.write("\n".join(self._contents))
def __iter__(self):
for l in self._contents:
yield l
def __iadd__(self,other):
self.resetInfo()
fName=path.splitext(other.filename())[0]
moreThanOne=len(other.patchInfo())>1
nr=1
for l in other:
if l.strip().find("solid")==0 or l.strip().find("endsolid")==0:
parts=l.split()
if len(parts)==1:
l=parts[0]+" "+fName
if moreThanOne:
l+="_%04d" % nr
else:
l=parts[0]+" %s:%s" %(fName," ".join(parts[1:]))
if parts[0]=="solid":
nr+=1
self._contents.append(l)
return self
# Should work with Python3 and Python2
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
|
PyFoam/Basics/STLFile.py
|
Python
|
gpl-2.0
| 6,542
|
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2006 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2007-2008 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Profile of the User connecting to the service, as well as the profile of
contacts in his/her contact list.
@sort: Profile, Contact, Group, ClientCapabilities
@group Enums: Presence, Membership, Privacy, NetworkID
@sort: Presence, Membership, Privacy, NetworkID"""
from papyon.util.decorator import rw_property
import gobject
import logging
__all__ = ['Profile', 'Contact', 'Group', 'EndPoint',
'Presence', 'Membership', 'ContactType', 'Privacy', 'NetworkID', 'ClientCapabilities']
logger = logging.getLogger('papyon.profile')
class ClientCapabilities(gobject.GObject):
"""Capabilities of the client. This allow adverstising what the User Agent
is capable of, for example being able to receive video stream, and being
able to receive nudges...
@ivar is_bot: is the client a bot
@type is_bot: bool
@ivar is_mobile_device: is the client running on a mobile device
@type is_mobile_device: bool
@ivar is_msn_mobile: is the client an MSN Mobile device
@type is_msn_mobile: bool
@ivar is_msn_direct_device: is the client an MSN Direct device
@type is_msn_direct_device: bool
@ivar is_media_center_user: is the client running on a Media Center
@type is_media_center_user: bool
@ivar is_msn8_user: is the client using WLM 8
@type is_msn8_user: bool
@ivar is_web_client: is the client web based
@type is_web_client: bool
@ivar is_tgw_client: is the client a gateway
@type is_tgw_client: bool
@ivar has_space: does the user has a space account
@type has_space: bool
@ivar has_webcam: does the user has a webcam plugged in
@type has_webcam: bool
@ivar has_onecare: does the user has the OneCare service
@type has_onecare: bool
@ivar renders_gif: can the client render gif (for ink)
@type renders_gif: bool
@ivar renders_isf: can the client render ISF (for ink)
@type renders_isf: bool
@ivar supports_chunking: does the client supports chunking messages
@type supports_chunking: bool
@ivar supports_direct_im: does the client supports direct IM
@type supports_direct_im: bool
@ivar supports_winks: does the client supports Winks
@type supports_winks: bool
@ivar supports_shared_search: does the client supports Shared Search
@type supports_shared_search: bool
@ivar supports_voice_im: does the client supports voice clips
@type supports_voice_im: bool
@ivar supports_secure_channel: does the client supports secure channels
@type supports_secure_channel: bool
@ivar supports_sip_invite: does the client supports SIP
@type supports_sip_invite: bool
@ivar supports_tunneled_sip: does the client supports tunneled SIP
@type supports_tunneled_sip: bool
@ivar supports_shared_drive: does the client supports File sharing
@type supports_shared_drive: bool
@ivar p2p_supports_turn: does the client supports TURN for p2p transfer
@type p2p_supports_turn: bool
@ivar p2p_bootstrap_via_uun: is the client able to use and understand UUN commands
@type p2p_bootstrap_via_uun: bool
@undocumented: __getattr__, __setattr__, __str__
"""
__gsignals__ = {
"capability-changed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object, object)),
}
MSNC = [0x0, # MSNC0
0x10000000, # MSNC1
0x20000000, # MSNC2
0x30000000, # MSNC3
0x40000000, # MSNC4
0x50000000, # MSNC5
0x60000000, # MSNC6
0x70000000, # MSNC7
0x80000000, # MSNC8
0x90000000, # MSNC9
0xA0000000] # MSNC10
_CAPABILITIES = {
'is_bot': 0x00020000,
'is_mobile_device': 0x00000001,
'is_msn_mobile': 0x00000040,
'is_msn_direct_device': 0x00000080,
'is_media_center_user': 0x00002000,
'is_msn8_user': 0x00000002,
'is_web_client': 0x00000200,
'is_tgw_client': 0x00000800,
'has_space': 0x00001000,
'has_webcam': 0x00000010,
'has_onecare': 0x01000000,
'renders_gif': 0x00000004,
'renders_isf': 0x00000008,
'supports_chunking': 0x00000020,
'supports_direct_im': 0x00004000,
'supports_winks': 0x00008000,
'supports_shared_search': 0x00010000,
'supports_voice_im': 0x00040000,
'supports_secure_channel': 0x00080000,
'supports_sip_invite': 0x00100000,
'supports_tunneled_sip': 0x00200000,
'supports_shared_drive': 0x00400000,
'p2p_aware': 0xF0000000,
'p2p_supports_turn': 0x02000000,
'p2p_bootstrap_via_uun': 0x04000000
}
_EXTRA = {
'supports_rtc_video': 0x00000010,
'supports_p2pv2': 0x00000030
}
def __init__(self, msnc=0, client_id="0:0"):
"""Initializer
@param msnc: The MSNC version
@type msnc: integer < 11 and >= 0
@param client_id: the full client ID"""
gobject.GObject.__init__(self)
caps = client_id.split(":")
capabilities = int(caps[0])
if len(caps) > 1:
extra = int(caps[1])
else:
extra = 0
gobject.GObject.__setattr__(self, 'capabilities', self.MSNC[msnc] | capabilities)
gobject.GObject.__setattr__(self, 'extra', extra)
def __getattr__(self, name):
if name in self._CAPABILITIES:
mask = self._CAPABILITIES[name]
id = self.capabilities
elif name in self._EXTRA:
mask = self._EXTRA[name]
id = self.extra
else:
raise AttributeError("object 'ClientCapabilities' has no attribute '%s'" % name)
return (id & mask != 0)
def __setattr__(self, name, value):
if name in self._CAPABILITIES:
mask = self._CAPABILITIES[name]
old_value = bool(self.capabilities & mask)
if value:
gobject.GObject.__setattr__(self, 'capabilities', self.capabilities | mask)
else:
gobject.GObject.__setattr__(self, 'capabilities', self.capabilities & ~mask)
if value != old_value:
self.emit('capability-changed', name, value)
elif name in self._EXTRA:
mask = self._EXTRA[name]
old_value = bool(self.extra & mask)
if value:
gobject.GObject.__setattr__(self, 'extra', self.extra | mask)
else:
gobject.GObject.__setattr__(self, 'extra', self.extra & ~mask)
if value != old_value:
self.emit('capability-changed', name, value)
else:
raise AttributeError("object 'ClientCapabilities' has no attribute '%s'" % name)
def __str__(self):
msnc = self.MSNC.index(self.capabilities & 0xF0000000)
if msnc >= 9:
client_id = "%s:%s" % (self.capabilities, self.extra)
else:
client_id = str(self.capabilities)
return client_id
class NetworkID(object):
"""Refers to the contact Network ID"""
MSN = 1
"""Microsoft Network"""
LCS = 2
"""Microsoft Live Communication Server"""
MOBILE = 4
"""Mobile phones"""
EXTERNAL = 32
"""External IM etwork, currently Yahoo!"""
class Presence(object):
"""Presence states.
The members of this class are used to identify the Presence that a user
wants to advertise to the contacts on his/her contact list.
@cvar ONLINE: online
@cvar BUSY: busy
@cvar IDLE: idle
@cvar AWAY: away
@cvar BE_RIGHT_BACK: be right back
@cvar ON_THE_PHONE: on the phone
@cvar OUT_TO_LUNCH: out to lunch
@cvar INVISIBLE: status hidden from contacts
@cvar OFFLINE: offline"""
ONLINE = 'NLN'
BUSY = 'BSY'
IDLE = 'IDL'
AWAY = 'AWY'
BE_RIGHT_BACK = 'BRB'
ON_THE_PHONE = 'PHN'
OUT_TO_LUNCH = 'LUN'
INVISIBLE = 'HDN'
OFFLINE = 'FLN'
class Privacy(object):
"""User privacy, defines the default policy concerning contacts not
belonging to the ALLOW list nor to the BLOCK list.
@cvar ALLOW: allow by default
@cvar BLOCK: block by default"""
ALLOW = 'AL'
BLOCK = 'BL'
class Membership(object):
"""Contact Membership"""
NONE = 0
"""Contact doesn't belong to the contact list, but belongs to the address book"""
FORWARD = 1
"""Contact belongs to our contact list"""
ALLOW = 2
"""Contact is explicitely allowed to see our presence regardless of the
currently set L{Privacy<papyon.profile.Privacy>}"""
BLOCK = 4
"""Contact is explicitely forbidden from seeing our presence regardless of
the currently set L{Privacy<papyon.profile.Privacy>}"""
REVERSE = 8
"""We belong to the FORWARD list of the contact"""
PENDING = 16
"""Contact pending"""
class ContactType(object):
"""Automatic update status flag"""
ME = "Me"
"""Contact is the user so there's no automatic update relationship"""
EXTERNAL = "Messenger2"
"""Contact is part of an external messenger service so there's no automatic
update relationship with the user"""
REGULAR = "Regular"
"""Contact has no automatic update relationship with the user"""
LIVE = "Live"
"""Contact has an automatic update relationship with the user and an
automatic update already occured"""
LIVE_PENDING = "LivePending"
"""Contact was requested automatic update from the user and didn't
give its authorization yet"""
LIVE_REJECTED = "LiveRejected"
"""Contact was requested automatic update from the user and rejected
the request"""
LIVE_DROPPED = "LiveDropped"
"""Contact had an automatic update relationship with the user but
the contact dropped it"""
class ContactFlag(object):
"""Internal contact flag"""
EXTENDED_PRESENCE_KNOWN = 1
"""Set once we receive the extended presence (UBX) for a buddy"""
class BaseContact(gobject.GObject):
__gsignals__ = {
"end-point-added": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"end-point-removed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
}
__gproperties__ = {
"client-capabilities": (gobject.TYPE_STRING,
"Client capabilities",
"The client capabilities of the contact 's client",
"",
gobject.PARAM_READABLE),
"current-media": (gobject.TYPE_PYOBJECT,
"Current media",
"The current media that the user wants to display",
gobject.PARAM_READABLE),
"display-name": (gobject.TYPE_STRING,
"Friendly name",
"A nickname that the user chooses to display to others",
"",
gobject.PARAM_READABLE),
"end-points": (gobject.TYPE_PYOBJECT,
"End points",
"List of locations where the user is connected",
gobject.PARAM_READABLE),
"flags": (gobject.TYPE_UINT,
"Flags",
"Contact flags.",
0, 1, 0, gobject.PARAM_READABLE),
"msn-object": (gobject.TYPE_STRING,
"MSN Object",
"MSN Object attached to the user, this generally represent "
"its display picture",
"",
gobject.PARAM_READABLE),
"personal-message": (gobject.TYPE_STRING,
"Personal message",
"The personal message that the user wants to display",
"",
gobject.PARAM_READABLE),
"presence": (gobject.TYPE_STRING,
"Presence",
"The presence to show to others",
Presence.OFFLINE,
gobject.PARAM_READABLE),
"signature-sound": (gobject.TYPE_PYOBJECT,
"Signature sound",
"The sound played by others' client when the user connects",
gobject.PARAM_READABLE),
}
def __init__(self):
gobject.GObject.__init__(self)
self._client_capabilities = ClientCapabilities()
self._current_media = None
self._display_name = ""
self._end_points = {}
self._flags = 0
self._personal_message = ""
self._presence = Presence.OFFLINE
self._msn_object = None
self._signature_sound = None
@property
def account(self):
"""Contact account
@rtype: utf-8 encoded string"""
return self._account
@property
def client_id(self):
"""The user capabilities
@rtype: ClientCapabilities"""
return self._client_capabilities
@property
def client_capabilities(self):
"""The user capabilities
@rtype: ClientCapabilities"""
return self._client_capabilities
@property
def current_media(self):
"""Contact current media
@rtype: (artist: string, track: string)"""
return self._current_media
@property
def display_name(self):
"""Contact display name
@rtype: utf-8 encoded string"""
return self._display_name
@property
def end_points(self):
"""List of contact's locations
@rtype: list of string"""
return self._end_points
@property
def flags(self):
"""Internal contact flags
@rtype: bitmask of L{Membership<papyon.profile.ContactFlag}s"""
return self._flags
@property
def id(self):
"""Contact identifier in a GUID form
@rtype: GUID string"""
return self._id
@property
def msn_object(self):
"""Contact MSN Object
@type: L{MSNObject<papyon.p2p.MSNObject>}"""
return self._msn_object
@property
def network_id(self):
"""Contact network ID
@rtype: L{NetworkID<papyon.profile.NetworkID>}"""
return self._network_id
@property
def personal_message(self):
"""Contact personal message
@rtype: utf-8 encoded string"""
return self._personal_message
@property
def presence(self):
"""Contact presence
@rtype: L{Presence<papyon.profile.Presence>}"""
return self._presence
@property
def signature_sound():
"""Contact signature sound
@type: string"""
return self._signature_sound
### flags management
def has_flag(self, flags):
return (self.flags & flags) == flags
def _set_flags(self, flags):
logger.info("Set contact %s flags to %i" % (self._account, flags))
self._flags = flags
self.notify("flags")
def _add_flag(self, flag):
self._set_flags(self._flags | flag)
def _remove_flag(self, flag):
self._set_flags(self._flags & ~flag)
def _server_property_changed(self, name, value):
if name == "client-capabilities":
value = ClientCapabilities(client_id=value)
attr_name = "_" + name.lower().replace("-", "_")
old_value = getattr(self, attr_name)
if value != old_value:
setattr(self, attr_name, value)
self.notify(name)
if name == "end-points":
self._diff_end_points(old_value, value)
def _diff_end_points(self, old_eps, new_eps):
added_eps = set(new_eps.keys()) - set(old_eps.keys())
removed_eps = set(old_eps.keys()) - set(new_eps.keys())
for ep in added_eps:
self.emit("end-point-added", new_eps[ep])
for ep in removed_eps:
self.emit("end-point-removed", old_eps[ep])
def do_get_property(self, pspec):
name = pspec.name.lower().replace("-", "_")
return getattr(self, name)
gobject.type_register(BaseContact)
class Profile(BaseContact):
"""Profile of the User connecting to the service"""
__gproperties__ = {
"profile": (gobject.TYPE_PYOBJECT,
"Profile",
"the text/x-msmsgsprofile sent by the server",
gobject.PARAM_READABLE),
"privacy": (gobject.TYPE_STRING,
"Privacy",
"The privacy policy to use",
Privacy.BLOCK,
gobject.PARAM_READABLE),
}
def __init__(self, account, ns_client):
BaseContact.__init__(self)
self._ns_client = ns_client
self._account = account[0]
self._password = account[1]
self._id = "00000000-0000-0000-0000-000000000000"
self._profile = ""
self._network_id = NetworkID.MSN
self._display_name = self._account.split("@", 1)[0]
self._privacy = Privacy.BLOCK
self._end_point_name = ""
self._client_capabilities = ClientCapabilities(10)
self._client_capabilities.supports_sip_invite = True
self._client_capabilities.supports_tunneled_sip = True
self._client_capabilities.supports_p2pv2 = True
self._client_capabilities.p2p_bootstrap_via_uun = True
self._client_capabilities.connect("capability-changed",
self._client_capability_changed)
self.__pending_set_presence = [self._presence, self._client_capabilities, self._msn_object]
self.__pending_set_personal_message = [self._personal_message, self._current_media]
@property
def password(self):
"""The user password
@rtype: utf-8 encoded string"""
return self._password
@property
def profile(self):
"""The user profile retrieved from the MSN servers
@rtype: dict of fields"""
return self._profile
@rw_property
def display_name():
"""The display name shown to you contacts
@type: utf-8 encoded string"""
def fset(self, display_name):
if not display_name:
return
self._ns_client.set_display_name(display_name)
def fget(self):
return self._display_name
return locals()
@rw_property
def presence():
"""The presence displayed to you contacts
@type: L{Presence<papyon.profile.Presence>}"""
def fset(self, presence):
if presence == self._presence:
return
self.__pending_set_presence[0] = presence
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._presence
return locals()
@rw_property
def privacy():
"""The default privacy, can be either Privacy.ALLOW or Privacy.BLOCK
@type: L{Privacy<papyon.profile.Privacy>}"""
def fset(self, privacy):
self._ns_client.set_privacy(privacy)
def fget(self):
return self._privacy
return locals()
@rw_property
def personal_message():
"""The personal message displayed to you contacts
@type: utf-8 encoded string"""
def fset(self, personal_message):
if personal_message == self._personal_message:
return
self.__pending_set_personal_message[0] = personal_message
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._personal_message
return locals()
@rw_property
def current_media():
"""The current media displayed to you contacts
@type: (artist: string, track: string)"""
def fset(self, current_media):
if current_media == self._current_media:
return
self.__pending_set_personal_message[1] = current_media
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._current_media
return locals()
@rw_property
def signature_sound():
"""The sound played when you are connecting
@type: string"""
def fset(self, signature_sound):
if signature_sound == self._signature_sound:
return
self.__pending_set_personal_message[2] = signature_sound
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._signature_sound
return locals()
@rw_property
def end_point_name():
def fset(self, name):
if name == self._end_point_name:
return
self._ns_client.set_end_point_name(name)
def fget(self):
return self._end_point_name
return locals()
@rw_property
def msn_object():
"""The MSNObject attached to your contact, this MSNObject represents the
display picture to be shown to your peers
@type: L{MSNObject<papyon.p2p.MSNObject>}"""
def fset(self, msn_object):
if msn_object == self._msn_object:
return
self.__pending_set_presence[2] = msn_object
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._msn_object
return locals()
@rw_property
def presence_msn_object():
def fset(self, args):
presence, msn_object = args
if presence == self._presence and msn_object == self._msn_object:
return
self.__pending_set_presence[0] = presence
self.__pending_set_presence[2] = msn_object
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._presence, self._msn_object
return locals()
@rw_property
def personal_message_current_media():
def fset(self, args):
personal_message, current_media = args
if personal_message == self._personal_message and \
current_media == self._current_media:
return
self.__pending_set_personal_message[0] = personal_message
self.__pending_set_personal_message[1] = current_media
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._personal_message, self._current_media
return locals()
def request_profile_url(self, callback):
self._ns_client.send_url_request(('PROFILE', '0x0409'), callback)
def _client_capability_changed(self, client, name, value):
self.__pending_set_presence[1] = self._client_capabilities
self._ns_client.set_presence(*self.__pending_set_presence)
def _server_property_changed(self, name, value):
if name == "msn-object" and value is not None:
self.__pending_set_presence[2] = value
BaseContact._server_property_changed(self, name, value)
gobject.type_register(Profile)
class Contact(BaseContact):
"""Contact related information"""
__gsignals__ = {
"infos-changed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
}
__gproperties__ = {
"memberships": (gobject.TYPE_UINT,
"Memberships",
"Membership relation with the contact.",
0, 31, 0, gobject.PARAM_READABLE),
"groups": (gobject.TYPE_PYOBJECT,
"Groups",
"The groups the contact belongs to",
gobject.PARAM_READABLE),
"infos": (gobject.TYPE_PYOBJECT,
"Informations",
"The contact informations",
gobject.PARAM_READABLE),
"contact-type": (gobject.TYPE_PYOBJECT,
"Contact type",
"The contact automatic update status flag",
gobject.PARAM_READABLE),
}
def __init__(self, id, network_id, account, display_name, cid=None,
memberships=Membership.NONE, contact_type=ContactType.REGULAR):
"""Initializer"""
BaseContact.__init__(self)
self._id = id or "00000000-0000-0000-0000-000000000000"
self._cid = cid or "00000000-0000-0000-0000-000000000000"
self._network_id = network_id
self._account = account
self._display_name = display_name
self._attributes = {'icon_url' : None}
self._groups = set()
self._infos = {}
self._memberships = memberships
self._contact_type = contact_type
def __repr__(self):
def memberships_str():
m = []
memberships = self._memberships
if memberships & Membership.FORWARD:
m.append('FORWARD')
if memberships & Membership.ALLOW:
m.append('ALLOW')
if memberships & Membership.BLOCK:
m.append('BLOCK')
if memberships & Membership.REVERSE:
m.append('REVERSE')
if memberships & Membership.PENDING:
m.append('PENDING')
return " | ".join(m)
template = "<papyon.Contact id='%s' network='%u' account='%s' memberships='%s'>"
return template % (self._id, self._network_id, self._account, memberships_str())
@property
def attributes(self):
"""Contact attributes
@rtype: {key: string => value: string}"""
return self._attributes.copy()
@property
def cid(self):
"""Contact ID
@rtype: GUID string"""
return self._cid
@property
def groups(self):
"""Contact list of groups
@rtype: set(L{Group<papyon.profile.Group>}...)"""
return self._groups
@property
def infos(self):
"""Contact informations
@rtype: {key: string => value: string}"""
return self._infos
@property
def memberships(self):
"""Contact membership value
@rtype: bitmask of L{Membership<papyon.profile.Membership>}s"""
return self._memberships
@property
def contact_type(self):
"""Contact automatic update status flag
@rtype: L{ContactType<papyon.profile.ContactType>}"""
return self._contact_type
@property
def domain(self):
"""Contact domain, which is basically the part after @ in the account
@rtype: utf-8 encoded string"""
result = self._account.split('@', 1)
if len(result) > 1:
return result[1]
else:
return ""
@property
def profile_url(self):
"""Contact profile url
@rtype: string"""
account = self._account
return "http://members.msn.com/default.msnw?mem=%s&pgmarket=" % account
### membership management
def is_member(self, memberships):
"""Determines if this contact belongs to the specified memberships
@type memberships: bitmask of L{Membership<papyon.profile.Membership>}s"""
return (self.memberships & memberships) == memberships
def is_mail_contact(self):
"""Determines if this contact is a mail contact"""
blank_id = "00000000-0000-0000-0000-000000000000"
return (not self.is_member(Membership.FORWARD) and self.id != blank_id)
def _set_memberships(self, memberships):
self._memberships = memberships
self.notify("memberships")
def _add_membership(self, membership):
self._memberships |= membership
self.notify("memberships")
def _remove_membership(self, membership):
self._memberships ^= membership
self.notify("memberships")
def _server_attribute_changed(self, name, value):
self._attributes[name] = value
def _server_infos_changed(self, updated_infos):
self._infos.update(updated_infos)
self.emit("infos-changed", updated_infos)
self.notify("infos")
def _reset(self):
self._id = "00000000-0000-0000-0000-000000000000"
self._cid = "00000000-0000-0000-0000-000000000000"
self._groups = set()
self._flags = 0
self._server_property_changed("presence", Presence.OFFLINE)
self._server_property_changed("display-name", self._account)
self._server_property_changed("personal-message", "")
self._server_property_changed("current-media", None)
self._server_property_changed("msn-object", None)
self._server_property_changed("client-capabilities", "0:0")
self._server_property_changed("end-points", {})
self._server_infos_changed({})
### group management
def _add_group_ownership(self, group):
self._groups.add(group)
def _delete_group_ownership(self, group):
self._groups.discard(group)
gobject.type_register(Contact)
class Group(gobject.GObject):
"""Group
@undocumented: __gsignals__, __gproperties__, do_get_property"""
__gproperties__ = {
"name": (gobject.TYPE_STRING,
"Group name",
"Name that the user chooses for the group",
"",
gobject.PARAM_READABLE)
}
def __init__(self, id, name):
"""Initializer"""
gobject.GObject.__init__(self)
self._id = id
self._name = name
@property
def id(self):
"""Group identifier in a GUID form
@rtype: GUID string"""
return self._id
@property
def name(self):
"""Group name
@rtype: utf-8 encoded string"""
return self._name
def _server_property_changed(self, name, value):
attr_name = "_" + name.lower().replace("-", "_")
old_value = getattr(self, attr_name)
if value != old_value:
setattr(self, attr_name, value)
self.notify(name)
def do_get_property(self, pspec):
name = pspec.name.lower().replace("-", "_")
return getattr(self, name)
gobject.type_register(Group)
class EndPoint(object):
def __init__(self, id, caps):
self.id = id
self.capabilities = ClientCapabilities(client_id=caps)
self.name = ""
self.idle = False
self.state = ""
self.client_type = 0
def __eq__(self, endpoint):
return (self.id == endpoint.id and
self.capabilities == endpoint.capabilities and
self.name == endpoint.name and
self.idle == endpoint.idle and
self.state == endpoint.state and
self.client_type == endpoint.client_type)
|
billiob/papyon
|
papyon/profile.py
|
Python
|
gpl-2.0
| 31,976
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry definition for fixture datasets."""
from flask.ext.registry import RegistryProxy
from invenio.ext.registry import ModuleAutoDiscoveryRegistry
from invenio.utils.datastructures import LazyDict
fixtures_proxy = RegistryProxy(
'fixtures', ModuleAutoDiscoveryRegistry, 'fixtures')
def fixtures_loader():
"""Load fixtures datasets."""
out = {}
for fixture in fixtures_proxy:
for data in getattr(fixture, '__all__', dir(fixture)):
if data[-4:] != 'Data' or data in out:
continue
out[data] = getattr(fixture, data)
return out
fixtures = LazyDict(fixtures_loader)
|
lnielsen/invenio
|
invenio/ext/fixtures/registry.py
|
Python
|
gpl-2.0
| 1,433
|
# -*- coding: utf-8 -*-
"""
Created on 17 Aug 2012
@author: Éric Piel
Copyright © 2012 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
# for listing all the types of file format supported
import importlib
import logging
from odemis.dataio import tiff
import os
# The interface of a "format manager" is as follows:
# * one module
# * FORMAT (string): user friendly name of the format
# * EXTENSIONS (list of strings): possible file-name extensions
# * export (callable): write model.DataArray into a file
# * read_data (callable): read a file into model.DataArray
# * read_thumbnail (callable): read the thumbnail(s) of a file
# if it doesn't support writing, then is has no .export(), and if it doesn't
# support reading, then it has not read_data().
__all__ = ["tiff", "stiff", "hdf5", "png", "csv"]
def get_available_formats(mode=os.O_RDWR, allowlossy=False):
"""
Find the available file formats
mode (os.O_RDONLY, os.O_WRONLY, or os.O_RDWR): whether only list
formats which can be read, which can be written, or all of them.
allowlossy (bool): If True, will also return the formats that can lose some
of the original information (when writting the data to a file)
return (dict string -> list of strings): name of each format -> list of
extensions
"""
formats = {}
# Look dynamically which format is available
for module_name in __all__:
try:
exporter = importlib.import_module("." + module_name, "odemis.dataio")
except Exception:
logging.info("Skipping exporter %s, which failed to load", module_name)
continue # module cannot be loaded
if not allowlossy and hasattr(exporter, "LOSSY") and exporter.LOSSY:
logging.debug("Skipping exporter %s as it is lossy", module_name)
continue
if ((mode == os.O_RDONLY and not hasattr(exporter, "read_data")) or
(mode == os.O_WRONLY and not hasattr(exporter, "export"))):
continue
formats[exporter.FORMAT] = exporter.EXTENSIONS
if not formats:
logging.error("No file converter found!")
return formats
def get_converter(fmt):
""" Return the converter corresponding to a format name
:param fmt: (string) the format name
:returns: (module) the converter
:raises ValueError: in case no exporter can be found
"""
# Look dynamically which format is available
for module_name in __all__:
try:
converter = importlib.import_module("." + module_name, "odemis.dataio")
except (ValueError, TypeError, ImportError):
logging.info("Import of converter %s failed", module_name, exc_info=True)
continue # module cannot be loaded
if fmt == converter.FORMAT:
return converter
raise ValueError("No converter for format %s found" % fmt)
def find_fittest_converter(filename, default=tiff, mode=os.O_WRONLY, allowlossy=False):
"""
Find the most fitting exporter according to a filename (actually, its extension)
filename (string): (path +) filename with extension
default (dataio. Module): default exporter to pick if no really fitting
exporter is found
mode: cf get_available_formats()
allowlossy: cf get_available_formats()
returns (dataio. Module): the right exporter
"""
# Find the extension of the file
basename = os.path.basename(filename).lower()
if basename == "":
raise ValueError("Filename should have at least one letter: '%s'" % filename)
# make sure we pick the format with the longest fitting extension
best_len = 0
best_fmt = None
for fmt, exts in get_available_formats(mode, allowlossy).items():
for e in exts:
if filename.endswith(e) and len(e) > best_len:
best_len = len(e)
best_fmt = fmt
if best_fmt is not None:
logging.debug("Determined that '%s' corresponds to %s format",
basename, best_fmt)
conv = get_converter(best_fmt)
else:
conv = default
return conv
|
gstiebler/odemis
|
src/odemis/dataio/__init__.py
|
Python
|
gpl-2.0
| 4,684
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: tdAddBasic1.py $
"""
VirtualBox Validation Kit - Additions Basics #1.
"""
__copyright__ = \
"""
Copyright (C) 2010-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard Python imports.
import os;
import sys;
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
from testdriver import vboxcon;
# Sub test driver imports.
sys.path.append(os.path.dirname(os.path.abspath(__file__))); # For sub-test drivers.
from tdAddGuestCtrl import SubTstDrvAddGuestCtrl;
class tdAddBasic1(vbox.TestDriver): # pylint: disable=R0902
"""
Additions Basics #1.
"""
## @todo
# - More of the settings stuff can e and need to be generalized!
#
def __init__(self):
vbox.TestDriver.__init__(self);
self.oTestVmSet = self.oTestVmManager.getStandardVmSet('nat');
self.asTestsDef = ['guestprops', 'stdguestprops', 'guestcontrol'];
self.asTests = self.asTestsDef;
self.addSubTestDriver(SubTstDrvAddGuestCtrl(self));
#
# Overridden methods.
#
def showUsage(self):
rc = vbox.TestDriver.showUsage(self);
reporter.log('');
reporter.log('tdAddBasic1 Options:');
reporter.log(' --tests <s1[:s2[:]]>');
reporter.log(' Default: %s (all)' % (':'.join(self.asTestsDef)));
reporter.log(' --quick');
reporter.log(' Same as --virt-modes hwvirt --cpu-counts 1.');
return rc;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
if asArgs[iArg] == '--tests':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of tests');
self.asTests = asArgs[iArg].split(':');
for s in self.asTests:
if s not in self.asTestsDef:
raise base.InvalidOption('The "--tests" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asTestsDef)));
elif asArgs[iArg] == '--quick':
self.parseOption(['--virt-modes', 'hwvirt'], 0);
self.parseOption(['--cpu-counts', '1'], 0);
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg);
return iArg + 1;
def actionConfig(self):
if not self.importVBoxApi(): # So we can use the constant below.
return False;
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT;
sGaIso = self.getGuestAdditionsIso();
return self.oTestVmSet.actionConfig(self, eNic0AttachType = eNic0AttachType, sDvdImage = sGaIso);
def actionExecute(self):
return self.oTestVmSet.actionExecute(self, self.testOneCfg);
#
# Test execution helpers.
#
def testOneCfg(self, oVM, oTestVm):
"""
Runs the specified VM thru the tests.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
fRc = False;
self.logVmInfo(oVM);
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(oTestVm.sVmName, fCdWait = True, \
sFileCdWait = 'AUTORUN.INF');
if oSession is not None:
self.addTask(oSession);
# Do the testing.
reporter.testStart('Install');
fRc, oTxsSession = self.testInstallAdditions(oSession, oTxsSession, oTestVm);
reporter.testDone();
fSkip = not fRc;
reporter.testStart('Guest Properties');
if not fSkip:
fRc = self.testGuestProperties(oSession, oTxsSession, oTestVm) and fRc;
reporter.testDone(fSkip);
reporter.testStart('Guest Control');
if not fSkip:
(fRc2, oTxsSession) = self.aoSubTstDrvs[0].testIt(oTestVm, oSession, oTxsSession);
fRc = fRc2 and fRc;
reporter.testDone(fSkip);
## @todo Save an restore test.
## @todo Reset tests.
## @todo Final test: Uninstallation.
# Cleanup.
self.removeTask(oTxsSession);
#self.terminateVmBySession(oSession)
return fRc;
def testInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Tests installing the guest additions
"""
if oTestVm.isWindows():
fRc = self.testWindowsInstallAdditions(oSession, oTxsSession, oTestVm);
else:
reporter.error('Guest Additions installation not implemented for %s yet! (%s)' % \
(oTestVm.sKind, oTestVm.sVmName));
fRc = False;
#
# Verify installation of Guest Additions using commmon bits.
#
if fRc is True:
#
# Wait for the GAs to come up.
#
## @todo need to signed up for a OnAdditionsStateChanged and wait runlevel to
# at least reach Userland.
#
# Check if the additions are operational.
#
try: oGuest = oSession.o.console.guest;
except:
reporter.errorXcpt('Getting IGuest failed.');
return (False, oTxsSession);
# Check the additionsVersion attribute. It must not be empty.
reporter.testStart('IGuest::additionsVersion');
fRc = self.testIGuest_additionsVersion(oGuest);
reporter.testDone();
reporter.testStart('IGuest::additionsRunLevel');
self.testIGuest_additionsRunLevel(oGuest, oTestVm);
reporter.testDone();
## @todo test IAdditionsFacilities.
return (fRc, oTxsSession);
def testWindowsInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Installs the Windows guest additions using the test execution service.
Since this involves rebooting the guest, we will have to create a new TXS session.
"""
asLogFile = [];
# Delete relevant log files.
if oTestVm.sKind in ('WindowsNT4',):
sWinDir = 'C:/WinNT/';
else:
sWinDir = 'C:/Windows/';
asLogFile = [sWinDir+'setupapi.log', sWinDir+'setupact.log', sWinDir+'setuperr.log'];
for sFile in asLogFile:
self.txsRmFile(oSession, oTxsSession, sFile);
# Install the public signing key.
if oTestVm.sKind not in ('WindowsNT4', 'Windows2000', 'WindowsXP', 'Windows2003'):
## TODO
pass;
#
# The actual install.
# Enable installing the optional auto-logon modules (VBoxGINA/VBoxCredProv) + (Direct)3D support.
# Also tell the installer to produce the appropriate log files.
#
fRc = self.txsRunTest(oTxsSession, 'VBoxWindowsAdditions.exe', 5 * 60 * 1000, \
'${CDROM}/VBoxWindowsAdditions.exe', ('${CDROM}/VBoxWindowsAdditions.exe', '/S', '/l', '/with_autologon'));
# For testing the installation (D)3D stuff ('/with_d3d') we need to boot up in safe mode.
#
# Reboot the VM and reconnect the TXS session.
#
if fRc is True:
(fRc, oTxsSession) = self.txsRebootAndReconnectViaTcp(oSession, oTxsSession, cMsTimeout = 3 * 60000);
# Add the Windows Guest Additions installer files to the files we want to download
# from the guest.
sGuestAddsDir = 'C:/Program Files/Oracle/VirtualBox Guest Additions/';
asLogFile.append(sGuestAddsDir + 'install.log');
# Note: There won't be a install_ui.log because of the silent installation.
asLogFile.append(sGuestAddsDir + 'install_drivers.log');
asLogFile.append('C:/Windows/setupapi.log');
asLogFile.append('C:/Windows/setupapi.dev.log');
#
# Download log files.
# Ignore errors as all files above might not be present (or in different locations)
# on different Windows guests.
#
self.txsDownloadFiles(oSession, oTxsSession, asLogFile, fIgnoreErrors = True);
return (fRc, oTxsSession);
def testIGuest_additionsVersion(self, oGuest):
"""
Returns False if no version string could be obtained, otherwise True
even though errors are logged.
"""
try:
sVer = oGuest.additionsVersion;
except:
reporter.errorXcpt('Getting the additions version failed.');
return False;
reporter.log('IGuest::additionsVersion="%s"' % (sVer,));
if sVer.strip() == '':
reporter.error('IGuest::additionsVersion is empty.');
return False;
if sVer != sVer.strip():
reporter.error('IGuest::additionsVersion is contains spaces: "%s".' % (sVer,));
asBits = sVer.split('.');
if len(asBits) < 3:
reporter.error('IGuest::additionsVersion does not contain at least tree dot separated fields: "%s" (%d).'
% (sVer, len(asBits)));
## @todo verify the format.
return True;
def testIGuest_additionsRunLevel(self, oGuest, oTestVm):
"""
Do run level tests.
"""
if oTestVm.isLoggedOntoDesktop():
eExpectedRunLevel = vboxcon.AdditionsRunLevelType_Desktop;
else:
eExpectedRunLevel = vboxcon.AdditionsRunLevelType_Userland;
## @todo Insert wait for the desired run level.
try:
iLevel = oGuest.additionsRunLevel;
except:
reporter.errorXcpt('Getting the additions run level failed.');
return False;
reporter.log('IGuest::additionsRunLevel=%s' % (iLevel,));
if iLevel != eExpectedRunLevel:
reporter.error('Expected runlevel %d, found %d instead' % (eExpectedRunLevel, iLevel));
return True;
def testGuestProperties(self, oSession, oTxsSession, oTestVm):
"""
Test guest properties.
"""
_ = oSession; _ = oTxsSession; _ = oTestVm;
return True;
if __name__ == '__main__':
sys.exit(tdAddBasic1().main(sys.argv));
|
sobomax/virtualbox_64bit_edd
|
src/VBox/ValidationKit/tests/additions/tdAddBasic1.py
|
Python
|
gpl-2.0
| 11,512
|
# -*- coding: utf-8 -*-
"""
Script to cache anonymous houseprint data into hp_anonymous.pkl
Created on 05/07/2014 by Roel De Coninck
"""
import os, sys
import inspect
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# add the path to opengrid to sys.path
sys.path.append(os.path.join(script_dir, os.pardir, os.pardir))
from opengrid.library.houseprint import Houseprint
##############################################################################
hp = Houseprint()
all_sensordata = hp.get_all_fluksosensors()
print('Sensor data fetched')
hp.save('/usr/local/src/opengrid/scripts/hp_anonymous.pkl')
hp.save('/var/www/private/hp_anonymous.pkl')
|
EnergyID/opengrid
|
scripts/job_cache_anonymous_houseprint.py
|
Python
|
gpl-2.0
| 730
|
#!/usr/bin/env python3
import dis
def myFunc():
x = 1
y = 2
z = 'abc' # noqa
return x + y
print(myFunc.__name__)
print(myFunc.__code__.co_varnames)
print(myFunc.__code__.co_consts)
print(myFunc.__code__.co_code)
dis.disassemble(myFunc.__code__)
|
jeremyprice/RU_Python_II
|
examples/example_function_obj.py
|
Python
|
gpl-2.0
| 269
|
# -*- coding: utf-8 -*-
"""
sphinx.directives
~~~~~~~~~~~~~~~~~
Handlers for additional ReST directives.
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
import sys
import string
import posixpath
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.roles import caption_ref_re
from sphinx.util.compat import make_admonition
ws_re = re.compile(r'\s+')
# ------ index markup --------------------------------------------------------------
entrytypes = [
'single', 'pair', 'triple', 'module', 'keyword', 'operator',
'object', 'exception', 'statement', 'builtin',
]
def index_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
arguments = arguments[0].split('\n')
env = state.document.settings.env
targetid = 'index-%s' % env.index_num
env.index_num += 1
targetnode = nodes.target('', '', ids=[targetid])
state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = ne = []
for entry in arguments:
entry = entry.strip()
for type in entrytypes:
if entry.startswith(type+':'):
value = entry[len(type)+1:].strip()
env.note_index_entry(type, value, targetid, value)
ne.append((type, value, targetid, value))
break
# shorthand notation for single entries
else:
for value in entry.split(','):
env.note_index_entry('single', value.strip(), targetid, value.strip())
ne.append(('single', value.strip(), targetid, value.strip()))
return [indexnode, targetnode]
index_directive.arguments = (1, 0, 1)
directives.register_directive('index', index_directive)
# ------ information units ---------------------------------------------------------
def desc_index_text(desctype, currmodule, name):
if desctype == 'function':
if not currmodule:
return '%s() (built-in function)' % name
return '%s() (in module %s)' % (name, currmodule)
elif desctype == 'data':
if not currmodule:
return '%s (built-in variable)' % name
return '%s (in module %s)' % (name, currmodule)
elif desctype == 'class':
return '%s (class in %s)' % (name, currmodule)
elif desctype == 'exception':
return name
elif desctype == 'method':
try:
clsname, methname = name.rsplit('.', 1)
except:
if currmodule:
return '%s() (in module %s)' % (name, currmodule)
else:
return '%s()' % name
if currmodule:
return '%s() (%s.%s method)' % (methname, currmodule, clsname)
else:
return '%s() (%s method)' % (methname, clsname)
elif desctype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except:
if currmodule:
return '%s (in module %s)' % (name, currmodule)
else:
return name
if currmodule:
return '%s (%s.%s attribute)' % (attrname, currmodule, clsname)
else:
return '%s (%s attribute)' % (attrname, clsname)
elif desctype == 'opcode':
return '%s (opcode)' % name
elif desctype == 'cfunction':
return '%s (C function)' % name
elif desctype == 'cmember':
return '%s (C member)' % name
elif desctype == 'cmacro':
return '%s (C macro)' % name
elif desctype == 'ctype':
return '%s (C type)' % name
elif desctype == 'cvar':
return '%s (C variable)' % name
else:
raise ValueError("unhandled descenv: %s" % desctype)
# ------ functions to parse a Python or C signature and create desc_* nodes.
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* # thing name
(?: \((.*)\) )? $ # optionally arguments
''', re.VERBOSE)
py_paramlist_re = re.compile(r'([\[\],])') # split at '[', ']' and ','
def parse_py_signature(signode, sig, desctype, env):
"""
Transform a python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None: raise ValueError
classname, name, arglist = m.groups()
add_module = True
if env.currclass:
if classname and classname.startswith(env.currclass):
fullname = classname + name
# class name is given again in the signature
classname = classname[len(env.currclass):].lstrip('.')
add_module = False
elif classname:
# class name is given in the signature, but different
fullname = env.currclass + '.' + classname + name
else:
# class name is not given in the signature
fullname = env.currclass + '.' + name
add_module = False
else:
fullname = classname and classname + name or name
if classname:
signode += addnodes.desc_classname(classname, classname)
# exceptions are a special case, since they are documented in the
# 'exceptions' module.
elif add_module and env.config.add_module_names and \
env.currmodule and env.currmodule != 'exceptions':
nodetext = env.currmodule + '.'
signode += addnodes.desc_classname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if not arglist:
if desctype in ('function', 'method'):
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
return fullname, classname
signode += addnodes.desc_parameterlist()
stack = [signode[-1]]
for token in py_paramlist_re.split(arglist):
if token == '[':
opt = addnodes.desc_optional()
stack[-1] += opt
stack.append(opt)
elif token == ']':
try: stack.pop()
except IndexError: raise ValueError
elif not token or token == ',' or token.isspace():
pass
else:
token = token.strip()
stack[-1] += addnodes.desc_parameter(token, token)
if len(stack) != 1: raise ValueError
return fullname, classname
c_sig_re = re.compile(
r'''^([^(]*?) # return type
([\w:]+) \s* # thing name (colon allowed for C++ class names)
(?: \((.*)\) )? $ # optionally arguments
''', re.VERBOSE)
c_funcptr_sig_re = re.compile(
r'''^([^(]+?) # return type
(\( [^()]+ \)) \s* # name in parentheses
\( (.*) \) $ # arguments
''', re.VERBOSE)
c_funcptr_name_re = re.compile(r'^\(\s*\*\s*(.*?)\s*\)$')
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
# These C types aren't described in the reference, so don't try to create
# a cross-reference to them
stopwords = set(('const', 'void', 'char', 'int', 'long', 'FILE', 'struct'))
def parse_c_type(node, ctype):
# add cross-ref nodes for all words
for part in filter(None, wsplit_re.split(ctype)):
tnode = nodes.Text(part, part)
if part[0] in string.letters+'_' and part not in stopwords:
pnode = addnodes.pending_xref(
'', reftype='ctype', reftarget=part, modname=None, classname=None)
pnode += tnode
node += pnode
else:
node += tnode
def parse_c_signature(signode, sig, desctype):
"""Transform a C (or C++) signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, name, arglist = m.groups()
signode += addnodes.desc_type("", "")
parse_c_type(signode[-1], rettype)
signode += addnodes.desc_name(name, name)
# clean up parentheses from canonical name
m = c_funcptr_name_re.match(name)
if m:
name = m.group(1)
if not arglist:
if desctype == 'cfunction':
# for functions, add an empty parameter list
signode += addnodes.desc_parameterlist()
return name
paramlist = addnodes.desc_parameterlist()
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
# this messes up function pointer types, but not too badly ;)
args = arglist.split(',')
for arg in args:
arg = arg.strip()
param = addnodes.desc_parameter('', '', noemph=True)
try:
ctype, argname = arg.rsplit(' ', 1)
except ValueError:
# no argument name given, only the type
parse_c_type(param, arg)
else:
parse_c_type(param, ctype)
param += nodes.emphasis(' '+argname, ' '+argname)
paramlist += param
signode += paramlist
return name
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)\s*\((.*)\)')
def parse_opcode_signature(signode, sig):
"""Transform an opcode signature into RST nodes."""
m = opcode_sig_re.match(sig)
if m is None: raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
option_desc_re = re.compile(
r'(/|-|--)([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def parse_option_desc(signode, sig):
"""Transform an option description into RST nodes."""
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
prefix, optname, args = m.groups()
if count:
signode += addnodes.desc_classname(', ', ', ')
signode += addnodes.desc_name(prefix+optname, prefix+optname)
signode += addnodes.desc_classname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
def desc_directive(desctype, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
node = addnodes.desc()
node['desctype'] = desctype
noindex = ('noindex' in options)
node['noindex'] = noindex
# remove backslashes to support (dummy) escapes; helps Vim's highlighting
signatures = map(lambda s: s.strip().replace('\\', ''), arguments[0].split('\n'))
names = []
clsname = None
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
# and add a reference target for it
sig = sig.strip()
signode = addnodes.desc_signature(sig, '')
signode['first'] = False
node.append(signode)
try:
if desctype in ('function', 'data', 'class', 'exception',
'method', 'attribute'):
name, clsname = parse_py_signature(signode, sig, desctype, env)
elif desctype in ('cfunction', 'cmember', 'cmacro', 'ctype', 'cvar'):
name = parse_c_signature(signode, sig, desctype)
elif desctype == 'opcode':
name = parse_opcode_signature(signode, sig)
elif desctype == 'cmdoption':
optname = parse_option_desc(signode, sig)
if not noindex:
targetname = 'cmdoption-' + optname
signode['ids'].append(targetname)
state.document.note_explicit_target(signode)
env.note_index_entry('pair', 'command line option; %s' % sig,
targetname, targetname)
env.note_reftarget('option', optname, targetname)
continue
elif desctype == 'describe':
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue
else:
# another registered generic x-ref directive
rolename, indextemplate, parse_node = additional_xref_types[desctype]
if parse_node:
fullname = parse_node(env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like xfileref_role does
fullname = ws_re.sub('', sig)
if not noindex:
targetname = '%s-%s' % (rolename, fullname)
signode['ids'].append(targetname)
state.document.note_explicit_target(signode)
if indextemplate:
indexentry = indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon+1:].strip()
env.note_index_entry(indextype, indexentry,
targetname, targetname)
env.note_reftarget(rolename, fullname, targetname)
# don't use object indexing below
continue
except ValueError, err:
# signature parsing failed
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue # we don't want an index entry here
# only add target and index entry if this is the first description of the
# function name in this desc block
if not noindex and name not in names:
fullname = (env.currmodule and env.currmodule + '.' or '') + name
# note target
if fullname not in state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not names)
state.document.note_explicit_target(signode)
env.note_descref(fullname, desctype, lineno)
names.append(name)
env.note_index_entry('single',
desc_index_text(desctype, env.currmodule, name),
fullname, fullname)
subnode = addnodes.desc_content()
# needed for automatic qualification of members
clsname_set = False
if desctype in ('class', 'exception') and names:
env.currclass = names[0]
clsname_set = True
elif desctype in ('method', 'attribute') and clsname and not env.currclass:
env.currclass = clsname.strip('.')
clsname_set = True
# needed for association of version{added,changed} directives
if names:
env.currdesc = names[0]
state.nested_parse(content, content_offset, subnode)
if clsname_set:
env.currclass = None
env.currdesc = None
node.append(subnode)
return [node]
desc_directive.content = 1
desc_directive.arguments = (1, 0, 1)
desc_directive.options = {'noindex': directives.flag}
desctypes = [
# the Python ones
'function',
'data',
'class',
'method',
'attribute',
'exception',
# the C ones
'cfunction',
'cmember',
'cmacro',
'ctype',
'cvar',
# the odd one
'opcode',
# for command line options
'cmdoption',
# the generic one
'describe',
'envvar',
]
for _name in desctypes:
directives.register_directive(_name, desc_directive)
# Generic cross-reference types; they can be registered in the application;
# the directives are either desc_directive or target_directive
additional_xref_types = {
# directive name: (role name, index text, function to parse the desc node)
'envvar': ('envvar', 'environment variable; %s', None),
}
# ------ target --------------------------------------------------------------------
def target_directive(targettype, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Generic target for user-defined cross-reference types."""
env = state.document.settings.env
rolename, indextemplate, _ = additional_xref_types[targettype]
# normalize whitespace in fullname like xfileref_role does
fullname = ws_re.sub('', arguments[0].strip())
targetname = '%s-%s' % (rolename, fullname)
node = nodes.target('', '', ids=[targetname])
state.document.note_explicit_target(node)
if indextemplate:
indexentry = indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon+1:].strip()
env.note_index_entry(indextype, indexentry, targetname, targetname)
env.note_reftarget(rolename, fullname, targetname)
return [node]
target_directive.content = 0
target_directive.arguments = (1, 0, 1)
# ------ versionadded/versionchanged -----------------------------------------------
def version_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = addnodes.versionmodified()
node['type'] = name
node['version'] = arguments[0]
if len(arguments) == 2:
inodes, messages = state.inline_text(arguments[1], lineno+1)
node.extend(inodes)
if content:
state.nested_parse(content, content_offset, node)
ret = [node] + messages
else:
ret = [node]
env = state.document.settings.env
env.note_versionchange(node['type'], node['version'], node, lineno)
return ret
version_directive.arguments = (1, 1, 1)
version_directive.content = 1
directives.register_directive('deprecated', version_directive)
directives.register_directive('versionadded', version_directive)
directives.register_directive('versionchanged', version_directive)
# ------ see also ------------------------------------------------------------------
def seealso_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
rv = make_admonition(
addnodes.seealso, name, ['See also'], options, content,
lineno, content_offset, block_text, state, state_machine)
return rv
seealso_directive.content = 1
seealso_directive.arguments = (0, 0, 0)
directives.register_directive('seealso', seealso_directive)
# ------ production list (for the reference) ---------------------------------------
token_re = re.compile('`([a-z_]+)`')
def token_xrefs(text, env):
retnodes = []
pos = 0
for m in token_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnodes.append(nodes.Text(txt, txt))
refnode = addnodes.pending_xref(m.group(1))
refnode['reftype'] = 'token'
refnode['reftarget'] = m.group(1)
refnode['modname'] = env.currmodule
refnode['classname'] = env.currclass
refnode += nodes.literal(m.group(1), m.group(1), classes=['xref'])
retnodes.append(refnode)
pos = m.end()
if pos < len(text):
retnodes.append(nodes.Text(text[pos:], text[pos:]))
return retnodes
def productionlist_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
node = addnodes.productionlist()
messages = []
i = 0
for rule in arguments[0].split('\n'):
if i == 0 and ':' not in rule:
# production group
continue
i += 1
try:
name, tokens = rule.split(':', 1)
except ValueError:
break
subnode = addnodes.production()
subnode['tokenname'] = name.strip()
if subnode['tokenname']:
idname = 'grammar-token-%s' % subnode['tokenname']
if idname not in state.document.ids:
subnode['ids'].append(idname)
state.document.note_implicit_target(subnode, subnode)
env.note_reftarget('token', subnode['tokenname'], idname)
subnode.extend(token_xrefs(tokens, env))
node.append(subnode)
return [node] + messages
productionlist_directive.content = 0
productionlist_directive.arguments = (1, 0, 1)
directives.register_directive('productionlist', productionlist_directive)
# ------ section metadata ----------------------------------------------------------
def module_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
modname = arguments[0].strip()
env.currmodule = modname
env.note_module(modname, options.get('synopsis', ''),
options.get('platform', ''),
'deprecated' in options)
modulenode = addnodes.module()
modulenode['modname'] = modname
modulenode['synopsis'] = options.get('synopsis', '')
targetnode = nodes.target('', '', ids=['module-' + modname])
state.document.note_explicit_target(targetnode)
ret = [modulenode, targetnode]
if 'platform' in options:
modulenode['platform'] = options['platform']
node = nodes.paragraph()
node += nodes.emphasis('Platforms: ', 'Platforms: ')
node += nodes.Text(options['platform'], options['platform'])
ret.append(node)
# the synopsis isn't printed; in fact, it is only used in the modindex currently
env.note_index_entry('single', '%s (module)' % modname, 'module-' + modname,
modname)
return ret
module_directive.arguments = (1, 0, 0)
module_directive.options = {'platform': lambda x: x,
'synopsis': lambda x: x,
'deprecated': directives.flag}
directives.register_directive('module', module_directive)
def currentmodule_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
# This directive is just to tell people that we're documenting
# stuff in module foo, but links to module foo won't lead here.
env = state.document.settings.env
modname = arguments[0].strip()
env.currmodule = modname
return []
currentmodule_directive.arguments = (1, 0, 0)
directives.register_directive('currentmodule', currentmodule_directive)
def author_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
# Show authors only if the show_authors option is on
env = state.document.settings.env
if not env.config.show_authors:
return []
para = nodes.paragraph()
emph = nodes.emphasis()
para += emph
if name == 'sectionauthor':
text = 'Section author: '
elif name == 'moduleauthor':
text = 'Module author: '
else:
text = 'Author: '
emph += nodes.Text(text, text)
inodes, messages = state.inline_text(arguments[0], lineno)
emph.extend(inodes)
return [para] + messages
author_directive.arguments = (1, 0, 1)
directives.register_directive('sectionauthor', author_directive)
directives.register_directive('moduleauthor', author_directive)
# ------ toctree directive ---------------------------------------------------------
def toctree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
suffix = env.config.source_suffix
dirname = posixpath.dirname(env.docname)
ret = []
subnode = addnodes.toctree()
includefiles = []
includetitles = {}
for docname in content:
if not docname:
continue
# look for explicit titles and documents ("Some Title <document>").
m = caption_ref_re.match(docname)
if m:
docname = m.group(2)
includetitles[docname] = m.group(1)
# absolutize filenames, remove suffixes
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
ret.append(state.document.reporter.warning(
'toctree references unknown document %r' % docname, line=lineno))
else:
includefiles.append(docname)
subnode['includefiles'] = includefiles
subnode['includetitles'] = includetitles
subnode['maxdepth'] = options.get('maxdepth', -1)
ret.append(subnode)
return ret
toctree_directive.content = 1
toctree_directive.options = {'maxdepth': int}
directives.register_directive('toctree', toctree_directive)
# ------ centered directive ---------------------------------------------------------
def centered_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not arguments:
return []
subnode = addnodes.centered()
inodes, messages = state.inline_text(arguments[0], lineno)
subnode.extend(inodes)
return [subnode] + messages
centered_directive.arguments = (1, 0, 1)
directives.register_directive('centered', centered_directive)
# ------ highlight directive --------------------------------------------------------
def highlightlang_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if 'linenothreshold' in options:
try:
linenothreshold = int(options['linenothreshold'])
except Exception:
linenothreshold = 10
else:
linenothreshold = sys.maxint
return [addnodes.highlightlang(lang=arguments[0].strip(),
linenothreshold=linenothreshold)]
highlightlang_directive.content = 0
highlightlang_directive.arguments = (1, 0, 0)
highlightlang_directive.options = {'linenothreshold': directives.unchanged}
directives.register_directive('highlight', highlightlang_directive)
directives.register_directive('highlightlang', highlightlang_directive) # old name
# ------ code-block directive -------------------------------------------------------
def codeblock_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
code = u'\n'.join(content)
literal = nodes.literal_block(code, code)
literal['language'] = arguments[0]
literal['linenos'] = 'linenos' in options
return [literal]
codeblock_directive.content = 1
codeblock_directive.arguments = (1, 0, 0)
codeblock_directive.options = {'linenos': directives.flag}
directives.register_directive('code-block', codeblock_directive)
directives.register_directive('sourcecode', codeblock_directive)
# ------ literalinclude directive ---------------------------------------------------
def literalinclude_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Like .. include:: :literal:, but only warns if the include file is not found."""
if not state.document.settings.file_insertion_enabled:
return [state.document.reporter.warning('File insertion disabled', line=lineno)]
env = state.document.settings.env
rel_fn = arguments[0]
source_dir = path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)))
fn = path.normpath(path.join(source_dir, rel_fn))
try:
f = open(fn)
text = f.read()
f.close()
except (IOError, OSError):
retnode = state.document.reporter.warning(
'Include file %r not found or reading it failed' % arguments[0], line=lineno)
else:
retnode = nodes.literal_block(text, text, source=fn)
retnode.line = 1
if options.get('language', ''):
retnode['language'] = options['language']
if 'linenos' in options:
retnode['linenos'] = True
state.document.settings.env.note_dependency(rel_fn)
return [retnode]
literalinclude_directive.options = {'linenos': directives.flag,
'language': directives.unchanged}
literalinclude_directive.content = 0
literalinclude_directive.arguments = (1, 0, 0)
directives.register_directive('literalinclude', literalinclude_directive)
# ------ glossary directive ---------------------------------------------------------
def glossary_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Glossary with cross-reference targets for :dfn: roles."""
env = state.document.settings.env
node = addnodes.glossary()
state.nested_parse(content, content_offset, node)
# the content should be definition lists
dls = [child for child in node if isinstance(child, nodes.definition_list)]
# now, extract definition terms to enable cross-reference creation
for dl in dls:
dl['classes'].append('glossary')
for li in dl.children:
if not li.children or not isinstance(li[0], nodes.term):
continue
termtext = li.children[0].astext()
new_id = 'term-' + nodes.make_id(termtext)
if new_id in env.gloss_entries:
new_id = 'term-' + str(len(env.gloss_entries))
env.gloss_entries.add(new_id)
li[0]['names'].append(new_id)
li[0]['ids'].append(new_id)
state.document.settings.env.note_reftarget('term', termtext.lower(),
new_id)
return [node]
glossary_directive.content = 1
glossary_directive.arguments = (0, 0, 0)
directives.register_directive('glossary', glossary_directive)
# ------ acks directive -------------------------------------------------------------
def acks_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = addnodes.acks()
state.nested_parse(content, content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list):
return [state.document.reporter.warning('.. acks content is not a list',
line=lineno)]
return [node]
acks_directive.content = 1
acks_directive.arguments = (0, 0, 0)
directives.register_directive('acks', acks_directive)
|
creasyw/IMTAphy
|
documentation/doctools/tags/0.2/sphinx/directives.py
|
Python
|
gpl-2.0
| 31,058
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Generic DB backend
#
import copy
import string
import sys
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhn_rpm
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server import rhnSQL, rhnChannel, taskomatic
from importLib import Diff, Package, IncompletePackage, Erratum, \
AlreadyUploadedError, InvalidPackageError, TransactionError, \
InvalidSeverityError, SourcePackage
from backendLib import TableCollection, sanitizeValue, TableDelete, \
TableUpdate, TableLookup, addHash, TableInsert
sequences = {
'rhnPackageCapability': 'rhn_pkg_capability_id_seq',
'rhnPackage': 'rhn_package_id_seq',
'rhnSourceRPM': 'rhn_sourcerpm_id_seq',
'rhnPackageGroup': 'rhn_package_group_id_seq',
'rhnErrata': 'rhn_errata_id_seq',
'rhnChannel': 'rhn_channel_id_seq',
'rhnChannelProduct': 'rhn_channelprod_id_seq',
'rhnPackageSource': 'rhn_package_source_id_seq',
'rhnChannelFamily': 'rhn_channel_family_id_seq',
'rhnCVE': 'rhn_cve_id_seq',
'rhnChannelArch': 'rhn_channel_arch_id_seq',
'rhnPackageArch': 'rhn_package_arch_id_seq',
'rhnServerArch': 'rhn_server_arch_id_seq',
'rhnCPUArch': 'rhn_cpu_arch_id_seq',
'rhnErrataFile': 'rhn_erratafile_id_seq',
'rhnKickstartableTree': 'rhn_kstree_id_seq',
'rhnArchType': 'rhn_archtype_id_seq',
'rhnPackageChangeLogRec': 'rhn_pkg_cl_id_seq',
'rhnPackageChangeLogData': 'rhn_pkg_cld_id_seq',
'rhnContentSource': 'rhn_chan_content_src_id_seq',
}
class Backend:
# This object is initialized by the specific subclasses (e.g.
# OracleBackend)
tables = TableCollection()
# TODO: Some reason why we're passing a module in here? Seems to
# always be rhnSQL anyhow...
def __init__(self, dbmodule):
self.dbmodule = dbmodule
self.sequences = {}
# TODO: Why is there a pseudo-constructor here instead of just using
# __init__?
def init(self):
# Initializes the database connection objects
# This function has to be called on a newly defined Backend object
# Initialize sequences
for k, v in sequences.items():
self.sequences[k] = self.dbmodule.Sequence(v)
# TODO: Why do we return a reference to ourselves? If somebody called
# this method they already have a reference...
return self
def setDateFormat(self, format):
sth = self.dbmodule.prepare("alter session set nls_date_format ='%s'"
% format)
sth.execute()
sth = self.dbmodule.prepare("alter session set nls_timestamp_format ='%s'"
% format)
sth.execute()
# Note: postgres-specific implementation overrides this in PostgresBackend
def processCapabilities(self, capabilityHash):
h = self.dbmodule.prepare("select lookup_package_capability(:name, :version) as id from dual")
for name, version in capabilityHash.keys():
ver = version
if version is None or version == '':
ver = None
h.execute(name=name, version=ver)
row = h.fetchone_dict()
capabilityHash[(name, version)] = row['id']
def processChangeLog(self, changelogHash):
sql = "select id from rhnPackageChangeLogData where name = :name and time = :time and text = :text"
h = self.dbmodule.prepare(sql)
toinsert = [[], [], [], []]
for name, time, text in changelogHash.keys():
val = {}
_buildExternalValue(val, {'name': name, 'time': time, 'text': text}, self.tables['rhnPackageChangeLogData'])
h.execute(name=val['name'], time=val['time'], text=val['text'])
row = h.fetchone_dict()
if row:
changelogHash[(name, time, text)] = row['id']
continue
id = self.sequences['rhnPackageChangeLogData'].next()
changelogHash[(name, time, text)] = id
toinsert[0].append(id)
toinsert[1].append(val['name'])
toinsert[2].append(val['time'])
toinsert[3].append(val['text'])
if not toinsert[0]:
# Nothing to do
return
sql = "insert into rhnPackageChangeLogData (id, name, time, text) values (:id, :name, :time, :text)"
h = self.dbmodule.prepare(sql)
h.executemany(id=toinsert[0], name=toinsert[1], time=toinsert[2], text=toinsert[3])
def processCVEs(self, cveHash):
# First figure out which CVE's are already inserted
sql = "select id from rhnCVE where name = :name"
h = self.dbmodule.prepare(sql)
toinsert = [[], []]
for cve_name in cveHash.keys():
h.execute(name=cve_name)
row = h.fetchone_dict()
if row:
cveHash[cve_name] = row['id']
continue
# Generate an id
id = self.sequences['rhnCVE'].next()
cveHash[cve_name] = id
toinsert[0].append(id)
toinsert[1].append(cve_name)
if not toinsert[0]:
# Nothing to do
return
sql = "insert into rhnCVE (id, name) values (:id, :name)"
h = self.dbmodule.prepare(sql)
h.executemany(id=toinsert[0], name=toinsert[1])
def lookupErrataFileTypes(self, hash):
hash.clear()
h = self.dbmodule.prepare("select id, label from rhnErrataFileType")
h.execute()
while 1:
row = h.fetchone_dict()
if not row:
break
hash[row['label']] = row['id']
return hash
def __lookupArches(self, archHash, table):
if not archHash:
return
sql = "select id from %s where label = :name" % table
h = self.dbmodule.prepare(sql)
for k in archHash.keys():
h.execute(name=str(k))
row = h.fetchone_dict()
if row:
archHash[k] = row['id']
# Else, it's an unsupported architecture
def lookupChannelArches(self, archHash):
return self.__lookupArches(archHash, 'rhnChannelArch')
def lookupPackageArches(self, archHash):
return self.__lookupArches(archHash, 'rhnPackageArch')
def lookupServerArches(self, archHash):
return self.__lookupArches(archHash, 'rhnServerArch')
def lookupArchTypes(self, arch_types_hash):
h = self.dbmodule.prepare(
"select id, name from rhnArchType where label = :label")
seq = self.sequences['rhnArchType']
updates = [[], []]
inserts = [[], [], []]
results = {}
for label, name in arch_types_hash.items():
h.execute(label=label)
row = h.fetchone_dict()
if not row:
next_id = seq.next()
inserts[0].append(next_id)
inserts[1].append(label)
inserts[2].append(name)
results[label] = next_id
continue
aid = row['id']
results[label] = aid
if name == row['name']:
# Nothing to do
continue
updates[0].append(aid)
updates[1].append(name)
if inserts[0]:
h = self.dbmodule.prepare("""
insert into rhnArchType (id, label, name)
values (:id, :label, :name)
""")
h.executemany(id=inserts[0], label=inserts[1], name=inserts[2])
if updates[0]:
h = self.dbmodule.prepare("""
update rhnArchType
set name = :name
where id = :id
""")
h.executemany(id=updates[0], name=updates[1])
# Finally, update the hash
arch_types_hash.update(results)
def _lookupOrg(self):
# Returns the org id
sql = "select min(id) as id from web_customer"
h = self.dbmodule.prepare(sql)
h.execute()
rows = h.fetchall_dict()
if not rows:
raise ValueError("No user is created")
return rows[0]['id']
def lookupOrg(self, org_name=None):
if not org_name:
return self._lookupOrg()
# Returns id of the org if found, None otherwise
sql = "select id from web_customer where name = :name"
h = self.dbmodule.prepare(sql)
h.execute(name=org_name)
row = h.fetchone_dict()
if not row:
return None
return row['id']
def lookupMaster(self, master_label):
# Returns the master record (if it exists)
sql = "select * from rhnISSMaster where label = :label"
h = self.dbmodule.prepare(sql)
h.execute(label=master_label)
return h.fetchone_dict()
def createMaster(self, master_label):
# Creates a master record with label master_label
sql = """
insert into rhnISSMaster (id, label)
values (sequence_nextval('rhn_issmaster_seq'), :label)
"""
h = self.dbmodule.prepare(sql)
h.execute(label=master_label)
def createMasterOrgs(self, master, orgs):
# Create master org records
insert = [[], [], []]
for org in orgs:
insert[0].append(master)
insert[1].append(org['id'])
insert[2].append(org['name'])
sql = """
insert into rhnISSMasterOrgs
(id, master_id, master_org_id, master_org_name)
values (sequence_nextval('rhn_issmasterorgs_seq'),
(select id from rhnISSMaster where label = :label),
:id, :name)
"""
h = self.dbmodule.prepare(sql)
h.executemany(label=insert[0], id=insert[1], name=insert[2])
def createOrgs(self, orgs):
# Create local org records
sql = """
insert into web_customer (id, name)
values (sequence_nextval('web_customer_id_seq'), :name)
"""
h = self.dbmodule.prepare(sql)
h.executemany(name=orgs)
sql = "select id, name from web_customer"
h = self.dbmodule.prepare(sql)
h.execute()
rows = h.fetchall_dict()
ret = {}
for row in rows:
ret[row['name']] = row['id']
return ret
def updateMasterOrgs(self, master_orgs):
# Update the master org to local org mapping
insert = [[], []]
for org in master_orgs:
insert[0].append(org['master_id'])
insert[1].append(org['local_id'])
sql = """
update rhnISSMasterOrgs
set local_org_id=:local
where master_org_id=:master
"""
h = self.dbmodule.prepare(sql)
h.executemany(master=insert[0], local=insert[1])
def lookupOrgTrusts(self):
# Return a hash of org trusts
sql = "select org_id, org_trust_id from rhnTrustedOrgs"
h = self.dbmodule.prepare(sql)
h.execute()
rows = h.fetchall_dict()
ret = {}
if rows:
for row in rows:
if row['org_id'] not in list(ret.keys()):
ret[row['org_id']] = []
ret[row['org_id']].append(row['org_trust_id'])
return ret
def clearOrgTrusts(self, org_id):
# Delete all trusts involving this org; trusts are always
# bi-directional
sql = """
delete from rhnTrustedOrgs
where org_id = :org_id
or org_trust_id = :org_id
"""
h = self.dbmodule.prepare(sql)
h.execute(org_id=org_id)
def createOrgTrusts(self, trusts):
# Create org trusts
insert = [[], []]
for trust in trusts:
insert[0].append(trust['org_id'])
insert[1].append(trust['trust'])
sql = """
insert into rhnTrustedOrgs (org_id, org_trust_id)
values (:id, :trust)
"""
h = self.dbmodule.prepare(sql)
h.executemany(id=insert[0], trust=insert[1])
def lookupOrgMap(self, master_label):
sql = """
select imo.master_org_id, imo.master_org_name, imo.local_org_id
from rhnISSMasterOrgs imo,
rhnISSMaster im
where im.id = imo.master_id
and im.label = :master_label
"""
h = self.dbmodule.prepare(sql)
h.execute(master_label=master_label)
rows = h.fetchall_dict()
maps = {'master-name-to-master-id': {},
'master-id-to-local-id': {}}
if not rows:
return maps
mn_to_mi = {} # master org name to master org id map
mi_to_li = {} # master org id to local org id map
for org in rows:
if ('master_org_id' in list(org.keys())
and 'master_org_name' in list(org.keys())
and org['master_org_id']
and org['master_org_name']):
mn_to_mi[org['master_org_name']] = org['master_org_id']
if ('master_org_id' in list(org.keys())
and 'local_org_id' in list(org.keys())
and org['master_org_id']
and org['local_org_id']):
mi_to_li[org['master_org_id']] = org['local_org_id']
maps['master-name-to-master-id'] = mn_to_mi
maps['master-id-to-local-id'] = mi_to_li
return maps
def lookupChannels(self, hash):
if not hash:
return
sql = "select id, channel_arch_id from rhnChannel where label = :label"
h = self.dbmodule.prepare(sql)
for k in hash.keys():
h.execute(label=k)
row = h.fetchone_dict()
if row:
hash[k] = row
# Else, it's an unsupported channel
def lookupChannelPackageArchCompat(self, channelArchHash):
# Return all the arches compatible with each key of archHash
sql = """
select package_arch_id
from rhnChannelPackageArchCompat
where channel_arch_id = :channel_arch_id
"""
h = self.dbmodule.prepare(sql)
for channel_arch_id in channelArchHash.keys():
dict = {}
h.execute(channel_arch_id=channel_arch_id)
while 1:
row = h.fetchone_dict()
if not row:
break
dict[row['package_arch_id']] = None
channelArchHash[channel_arch_id] = dict
def lookupServerGroupTypes(self, entries_hash):
sql = """
select id
from rhnServerGroupType
where label = :label
"""
h = self.dbmodule.prepare(sql)
for sgt in entries_hash.keys():
h.execute(label=sgt)
row = h.fetchone_dict()
if not row:
# server group not found
continue
entries_hash[sgt] = row['id']
def lookupPackageNames(self, nameHash):
if not nameHash:
return
sql = "select LOOKUP_PACKAGE_NAME(:name) id from dual"
h = self.dbmodule.prepare(sql)
for k in nameHash.keys():
h.execute(name=k)
nameHash[k] = h.fetchone_dict()['id']
def lookupErratum(self, erratum):
if not erratum:
return None
sql = """
select advisory
from rhnErrata
where advisory_name = :advisory_name
"""
h = self.dbmodule.prepare(sql)
h.execute(advisory_name=erratum['advisory_name'])
return h.fetchone_dict()
def lookupErrataSeverityId(self, erratum):
"""
for the given severity type retuns the id
associated in the rhnErratSeverity table.
"""
if not erratum:
return None
sql = """
select id
from rhnErrataSeverity
where label = :severity
"""
h = self.dbmodule.prepare(sql)
if erratum['security_impact'] == '':
return None
# concatenate the severity to reflect the db
# bz-204374: rhnErrataSeverity tbl has lower case severity values,
# so we convert severity in errata hash to lower case to lookup.
severity_label = 'errata.sev.label.' + erratum['security_impact'].lower()
h.execute(severity=severity_label)
row = h.fetchone_dict()
if not row:
raise InvalidSeverityError("Invalid severity: %s" % erratum['security_impact'])
return row['id']
def lookupEVRs(self, evrHash):
sql = "select LOOKUP_EVR(:epoch, :version, :release) id from dual"
h = self.dbmodule.prepare(sql)
for evr in evrHash.keys():
epoch, version, release = evr
if epoch == '' or epoch is None:
epoch = None
else:
epoch = str(epoch)
h.execute(epoch=epoch, version=version, release=release)
row = h.fetchone_dict()
if row:
evrHash[evr] = row['id']
# Note: postgres-specific implementation overrides this in PostgresBackend
def lookupChecksums(self, checksumHash):
if not checksumHash:
return
sql = "select lookup_checksum(:ctype, :csum) id from dual"
h = self.dbmodule.prepare(sql)
for k in checksumHash.keys():
ctype, csum = k
if csum != '':
h.execute(ctype=ctype, csum=csum)
row = h.fetchone_dict()
if row:
checksumHash[k] = row['id']
def lookupChecksumTypes(self, checksumTypeHash):
if not checksumTypeHash:
return
sql = "select id from rhnChecksumType where label = :label"
h = self.dbmodule.prepare(sql)
for l in checksumTypeHash.keys():
h.execute(label=l)
row = h.fetchone_dict()
if row:
checksumTypeHash[l] = row['id']
def lookupPackageNEVRAs(self, nevraHash):
sql = "select LOOKUP_PACKAGE_NEVRA(:name, :evr, :arch) id from dual"
h = self.dbmodule.prepare(sql)
for nevra in nevraHash:
name, evr, arch = nevra
if arch is None:
arch = ''
h.execute(name=name, evr=evr, arch=arch)
row = h.fetchone_dict()
if row:
nevraHash[nevra] = row['id']
def lookupPackagesByNEVRA(self, nevraHash):
sql = """
select id from rhnPackage
where name_id = :name and
evr_id = :evr and
package_arch_id = :arch
"""
h = self.dbmodule.prepare(sql)
for nevra in nevraHash:
name, evr, arch = nevra
h.execute(name=name, evr=evr, arch=arch)
row = h.fetchone_dict()
if row:
nevraHash[nevra] = row['id']
def lookupPackageKeyId(self, header):
lookup_keyid_sql = rhnSQL.prepare("""
select pk.id
from rhnPackagekey pk,
rhnPackageKeyType pkt,
rhnPackageProvider pp
where pk.key_id = :key_id
and pk.key_type_id = pkt.id
and pk.provider_id = pp.id
""")
sigkeys = rhn_rpm.RPM_Header(header).signatures
key_id = None # _key_ids(sigkeys)[0]
for sig in sigkeys:
if sig['signature_type'] == 'gpg':
key_id = sig['key_id']
lookup_keyid_sql.execute(key_id=key_id)
keyid = lookup_keyid_sql.fetchall_dict()
return keyid[0]['id']
def lookupSourceRPMs(self, hash):
self.__processHash('lookup_source_name', hash)
def lookupPackageGroups(self, hash):
self.__processHash('lookup_package_group', hash)
def lookupPackages(self, packages, checksums, ignore_missing=0):
# If nevra is enabled use checksum as primary key
self.validate_pks()
for package in packages:
if not isinstance(package, IncompletePackage):
raise TypeError("Expected an IncompletePackage instance, found %s" %
str(type(package)))
for package in packages:
# here we need to figure out which checksum we have in the database
not_found = None
for type, chksum in package['checksums'].items():
package['checksum_type'] = type
package['checksum'] = chksum
package['checksum_id'] = checksums[(type, chksum)]
try:
self.__lookupObjectCollection([package], 'rhnPackage')
not_found = None
break
except InvalidPackageError:
e = sys.exc_info()[1]
not_found = (e, sys.exc_info()[2])
if not_found and not ignore_missing:
# package is not in database at all
raise_with_tb(not_found[0], not_found[1])
def lookupChannelFamilies(self, hash):
if not hash:
return
sql = "select id from rhnChannelFamily where label = :label"
h = self.dbmodule.prepare(sql)
for k in hash.keys():
h.execute(label=k)
row = h.fetchone_dict()
if row:
hash[k] = row['id']
# Else, it's an unsupported channel
def lookup_kstree_types(self, hash):
return self._lookup_in_table('rhnKSTreeType', 'rhn_kstree_type_seq',
hash)
def lookup_ks_install_types(self, hash):
return self._lookup_in_table('rhnKSInstallType',
'rhn_ksinstalltype_id_seq', hash)
def _lookup_in_table(self, table_name, sequence_name, hash):
t = self.dbmodule.Table(table_name, 'label')
seq = self.dbmodule.Sequence(sequence_name)
to_insert = []
to_update = []
result = {}
for label, name in hash.items():
row = t[label]
if not row:
row_id = seq.next()
result[label] = row_id
to_insert.append((label, name, row_id))
continue
row_id = row['id']
result[label] = row_id
if row['name'] != name:
to_update.append((label, name))
continue
# Entry found in the table - nothing more to do
if to_insert:
# Have to insert rows
row_ids = []
labels = []
names = []
for label, name, row_id in to_insert:
row_ids.append(row_id)
labels.append(label)
names.append(name)
sql = """
insert into %s (id, label, name) values (:id, :label, :name)
"""
h = self.dbmodule.prepare(sql % table_name)
h.executemany(id=row_ids, label=labels, name=names)
if to_update:
labels = []
names = []
for label, name in to_update:
labels.append(label)
names.append(name)
sql = """
update %s set name = :name where label = :label
"""
h = self.dbmodule.prepare(sql % table_name)
h.executemany(label=labels, name=names)
# Update the returning value
hash.clear()
hash.update(result)
return hash
def processChannelArches(self, arches):
self.__processObjectCollection(arches, 'rhnChannelArch',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processPackageArches(self, arches):
self.__processObjectCollection(arches, 'rhnPackageArch',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processServerArches(self, arches):
self.__processObjectCollection(arches, 'rhnServerArch',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processCPUArches(self, arches):
self.__processObjectCollection(arches, 'rhnCPUArch',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processMasterOrgs(self, orgs):
self.__processObjectCollection(orgs, 'rhnISSMasterOrgs',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processOrgs(self, orgs):
self.__processObjectCollection(orgs, 'web_customer',
uploadForce=4, ignoreUploaded=1, severityLimit=4)
def processServerPackageArchCompatMap(self, entries):
self.__populateTable('rhnServerPackageArchCompat', entries,
delete_extra=1)
def processServerChannelArchCompatMap(self, entries):
self.__populateTable('rhnServerChannelArchCompat', entries,
delete_extra=1)
def processChannelPackageArchCompatMap(self, entries):
self.__populateTable('rhnChannelPackageArchCompat', entries,
delete_extra=1)
def processServerGroupServerArchCompatMap(self, entries):
self.__populateTable('rhnServerServerGroupArchCompat', entries,
delete_extra=1)
def processPackages(self, packages, uploadForce=0, ignoreUploaded=0,
forceVerify=0, transactional=0):
# Insert/update the packages
self.validate_pks()
childTables = {
'rhnPackageProvides': 'package_id',
'rhnPackageRequires': 'package_id',
'rhnPackageConflicts': 'package_id',
'rhnPackageObsoletes': 'package_id',
'rhnPackageRecommends': 'package_id',
'rhnPackageSuggests': 'package_id',
'rhnPackageSupplements': 'package_id',
'rhnPackageEnhances': 'package_id',
'rhnPackageBreaks': 'package_id',
'rhnPackagePredepends': 'package_id',
'rhnPackageFile': 'package_id',
'rhnPackageChangeLogRec': 'package_id',
}
for package in packages:
if not isinstance(package, Package):
raise TypeError("Expected a Package instance")
tableList = copy.deepcopy(childTables)
# older sat packages wont have these fields
# avoid Null insertions
if package['header_start'] is None:
package['header_start'] = -1
package['header_end'] = -1
self.__processObjectCollection__([package, ], 'rhnPackage', tableList,
uploadForce=uploadForce, forceVerify=forceVerify,
ignoreUploaded=ignoreUploaded, severityLimit=1,
transactional=transactional)
def processErrata(self, errata):
# Insert/update the packages
childTables = [
'rhnChannelErrata',
'rhnErrataBugList',
'rhnErrataFile',
'rhnErrataKeyword',
'rhnErrataPackage',
'rhnErrataCVE',
]
for erratum in errata:
if not isinstance(erratum, Erratum):
raise TypeError("Expected an Erratum instance")
return self.__processObjectCollection(errata, 'rhnErrata', childTables,
'errata_id', uploadForce=4, ignoreUploaded=1, forceVerify=1,
transactional=1)
def update_channels_affected_by_errata(self, dml):
# identify errata that were affected
affected_errata_ids = {}
for op_type in ['insert', 'update', 'delete']:
op_values = getattr(dml, op_type)
for table_name, values_hash in op_values.items():
if table_name == 'rhnErrata':
field = 'id'
elif 'errata_id' in values_hash:
field = 'errata_id'
# Now we know in which field to look for changes
for erratum_id in values_hash[field]:
affected_errata_ids[erratum_id] = None
# Get affected channels
affected_channel_ids = {}
h = self.dbmodule.prepare("""
select channel_id
from rhnChannelErrata
where errata_id = :errata_id
""")
for errata_id in affected_errata_ids.keys():
h.execute(errata_id=errata_id)
channel_ids = h.fetchall_dict() or []
channel_ids = [x['channel_id'] for x in channel_ids]
for channel_id in channel_ids:
affected_channel_ids[channel_id] = errata_id
# Now update the channels
update_channel = self.dbmodule.Procedure('rhn_channel.update_channel')
invalidate_ss = 0
for channel_id in affected_channel_ids.keys():
update_channel(channel_id, invalidate_ss)
h = self.dbmodule.prepare("""
select advisory from rhnErrata where id = :errata_id
""")
h.execute(errata_id=affected_channel_ids[channel_id])
advisory = h.fetchone()[0]
channel = rhnChannel.Channel()
channel.load_by_id(channel_id)
taskomatic.add_to_repodata_queue(channel.get_label(), "errata",
advisory)
def processKickstartTrees(self, ks_trees):
childTables = [
'rhnKSTreeFile',
#'rhnKSTreeType',
#'rhnKSInstallType',
]
self.__processObjectCollection(ks_trees, 'rhnKickstartableTree',
childTables, 'kstree_id', uploadForce=4, forceVerify=1,
ignoreUploaded=1, severityLimit=1, transactional=1)
def queue_errata(self, errata, timeout=0):
# timeout is the numer of seconds we want the execution to be delayed
if not errata:
return
# Figure out the errata ids
errata_channel_ids = []
for erratum in errata:
if erratum.ignored:
# Skip it
continue
if erratum.diff_result is not None:
if erratum.diff_result.level != 0:
# New or modified in some way, queue it
# XXX we may not want to do this for trivial changes,
# but not sure what trivial is
for cid in erratum['channels']:
errata_channel_ids.append(
(erratum.id, cid['channel_id']))
if not errata_channel_ids:
# Nothing to do
return
hdel = self.dbmodule.prepare("""
delete from rhnErrataQueue where errata_id = :errata_id
""")
h = self.dbmodule.prepare("""
insert into rhnErrataQueue (errata_id, channel_id, next_action)
values (:errata_id, :channel_id, current_timestamp + numtodsinterval(:timeout, 'second'))
""")
errata_ids = [x[0] for x in errata_channel_ids]
channel_ids = [x[1] for x in errata_channel_ids]
timeouts = [timeout] * len(errata_ids)
hdel.executemany(errata_id=errata_ids)
return h.executemany(errata_id=errata_ids, channel_id=channel_ids,
timeout=timeouts)
def processChannels(self, channels, base_channels):
childTables = [
'rhnChannelFamilyMembers', 'rhnReleaseChannelMap',
]
if base_channels:
childTables.append('rhnDistChannelMap')
self.__processObjectCollection(channels, 'rhnChannel', childTables,
'channel_id', uploadForce=4, ignoreUploaded=1, forceVerify=1)
def orgTrustExists(self, org_id, trust_id):
sql = """
select *
from rhnTrustedOrgs
where org_id = :org_id
and org_trust_id = :trust_id
"""
h = self.dbmodule.prepare(sql)
h.execute(org_id=org_id, trust_id=trust_id)
row = h.fetchone_dict()
if row:
return True
return False
def clearChannelTrusts(self, label):
sql = """
delete from rhnChannelTrust where channel_id =
(select id from rhnChannel where label = :label)
"""
h = self.dbmodule.prepare(sql)
h.execute(label=label)
def processChannelTrusts(self, channel_trusts):
# Create channel trusts
insert = [[], []]
for trust in channel_trusts:
insert[0].append(trust['channel-label'])
insert[1].append(trust['org-id'])
sql = """
insert into rhnChannelTrust (channel_id, org_trust_id)
values ((select id from rhnChannel where label = :label),
:org_id)
"""
h = self.dbmodule.prepare(sql)
h.executemany(label=insert[0], org_id=insert[1])
def processChannelFamilies(self, channels):
childTables = []
self.__processObjectCollection(channels, 'rhnChannelFamily',
childTables, 'channel_family_id', uploadForce=4, ignoreUploaded=1,
forceVerify=1)
def processChannelFamilyMembers(self, channel_families):
# Channel families now contain channel memberships too
h_lookup_cfid = self.dbmodule.prepare("""
select channel_family_id
from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
cf_ids = []
c_ids = []
for cf in channel_families:
if 'private-channel-family' in cf['label']:
# Its a private channel family and channel family members
# will be different from server as this is most likely ISS
# sync. Don't compare and delete custom channel families.
continue
for cid in cf['channel_ids']:
# Look up channel families for this channel
h_lookup_cfid.execute(channel_id=cid)
row = h_lookup_cfid.fetchone_dict()
if row and row['channel_family_id'] == cf.id:
# Nothing to do here, we already have this mapping
continue
# need to delete this entry and add the one for the new
# channel family
cf_ids.append(cf.id)
c_ids.append(cid)
if not c_ids:
# We're done
return
hdel = self.dbmodule.prepare("""
delete from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
hins = self.dbmodule.prepare("""
insert into rhnChannelFamilyMembers (channel_id, channel_family_id)
values (:channel_id, :channel_family_id)
""")
hdel.executemany(channel_id=c_ids)
hins.executemany(channel_family_id=cf_ids, channel_id=c_ids)
def processChannelFamilyPermissions(self, channel_families):
# Since this is not evaluated in rhn_entitlements anymore,
# make channel families without org globally visible
cf_ids = [cf.id for cf in channel_families if 'private-channel-family' not in cf['label']]
h_public_sel = self.dbmodule.prepare("""
select channel_family_id from rhnPublicChannelFamily
""")
h_public_sel.execute()
public_cf_in_db = [x['channel_family_id'] for x in h_public_sel.fetchall_dict() or []]
public_cf_to_insert = [x for x in cf_ids if x not in public_cf_in_db]
h_public_ins = self.dbmodule.prepare("""
insert into rhnPublicChannelFamily (channel_family_id)
values (:channel_family_id)
""")
h_public_ins.executemany(channel_family_id=public_cf_to_insert)
def processDistChannelMap(self, dcms):
dcmTable = self.tables['rhnDistChannelMap']
lookup = TableLookup(dcmTable, self.dbmodule)
dmlobj = DML([dcmTable.name], self.tables)
for dcm in dcms:
if dcm.ignored:
# Skip it
continue
h = lookup.query(dcm)
row = h.fetchone_dict()
if not row:
extObject = {}
_buildExternalValue(extObject, dcm, dcmTable)
addHash(dmlobj.insert[dcmTable.name], extObject)
# Since this table has all the columns in unique constraints, we
# don't care much about updates
self.__doDML(dmlobj)
def processChannelProduct(self, channel):
""" Associate product with channel """
channel['channel_product'] = channel['product_name']
channel['channel_product_version'] = channel['product_version']
channel['channel_product_beta'] = channel['product_beta']
channel['channel_product_id'] = self.lookupChannelProduct(channel)
if not channel['channel_product_id']:
# If no channel product dont update
return
statement = self.dbmodule.prepare("""
UPDATE rhnChannel
SET channel_product_id = :channel_product_id
WHERE id = :id
AND (channel_product_id is NULL
OR channel_product_id <> :channel_product_id)
""")
statement.execute(id=channel.id,
channel_product_id=channel['channel_product_id'])
def processChannelContentSources(self, channel):
""" Associate content sources with channel """
# Which content sources are assigned to this channel
select_sql = self.dbmodule.prepare("""
select source_id from rhnChannelContentSource
where channel_id = :channel_id
""")
select_sql.execute(channel_id=channel.id)
sources_in_db = [x['source_id'] for x in select_sql.fetchall_dict() or []]
# Which content sources should be assigned to this channel
sources_needed = []
if 'content-sources' in channel and channel['content-sources']:
for source in channel['content-sources']:
sources_needed.append(self.lookupContentSource(source['label']))
# What to delete and insert
sources_to_delete = [x for x in sources_in_db if x not in sources_needed]
sources_to_insert = [x for x in sources_needed if x not in sources_in_db]
delete_sql = self.dbmodule.prepare("""
delete from rhnChannelContentSource
where source_id = :source_id
and channel_id = :channel_id
""")
insert_sql = self.dbmodule.prepare("""
insert into rhnChannelContentSource
(source_id, channel_id)
values (:source_id, :channel_id)
""")
for source_id in sources_to_delete:
delete_sql.execute(source_id=source_id, channel_id=channel.id)
for source_id in sources_to_insert:
insert_sql.execute(source_id=source_id, channel_id=channel.id)
def processProductNames(self, batch):
""" Check if ProductName for channel in batch is already in DB.
If not add it there.
"""
statement = self.dbmodule.prepare("""
insert into rhnProductName
(id, label, name)
values (sequence_nextval('rhn_productname_id_seq'),
:product_label, :product_name)
""")
for channel in batch:
if not self.lookupProductNames(channel['label']):
statement.execute(product_label=channel['label'],
product_name=channel['name'])
def processContentSources(self, batch):
""" Insert content source into DB """
childTables = ['rhnContentSourceSsl']
self.__processObjectCollection(batch, 'rhnContentSource',
childTables, 'content_source_id', uploadForce=4, ignoreUploaded=1,
forceVerify=1)
def lookupContentSource(self, label):
""" Get id for given content source """
sql = self.dbmodule.prepare("""
select id from rhnContentSource where label = :label and org_id is null
""")
sql.execute(label=label)
content_source = sql.fetchone_dict()
if content_source:
return content_source['id']
return
def lookupContentSourceType(self, label):
""" Get id for given content type label """
sql = self.dbmodule.prepare("""
select id from rhnContentSourceType where label = :label
""")
sql.execute(label=label)
source_type = sql.fetchone_dict()
if source_type:
return source_type['id']
return
def lookupProductNames(self, label):
""" For given label of product return its id.
If product do not exist return None
"""
statement = self.dbmodule.prepare("""
SELECT id
FROM rhnProductName
WHERE label = :label
""")
statement.execute(label=label)
product = statement.fetchone_dict()
if product:
return product['id']
return
# bug #528227
def lookupChannelOrg(self, label):
"""For given label of channel return its org_id.
If channel with given label does not exist or is NULL, return None.
"""
statement = self.dbmodule.prepare("""
SELECT org_id
FROM rhnChannel
WHERE label = :label
""")
statement.execute(label=label)
org_id = statement.fetchone_dict()
if org_id:
return org_id
return
def lookupChannelProduct(self, channel):
statement = self.dbmodule.prepare("""
SELECT id
FROM rhnChannelProduct
WHERE product = :product
AND version = :version
AND beta = :beta
""")
statement.execute(product=channel['channel_product'],
version=channel['channel_product_version'],
beta=channel['channel_product_beta'])
product = statement.fetchone_dict()
if product:
return product['id']
return self.createChannelProduct(channel)
def createChannelProduct(self, channel):
id = self.sequences['rhnChannelProduct'].next()
statement = self.dbmodule.prepare("""
INSERT
INTO rhnChannelProduct
(id, product, version, beta)
VALUES (:id, :product, :version, :beta)
""")
statement.execute(id=id,
product=channel['channel_product'],
version=channel['channel_product_version'],
beta=channel['channel_product_beta'])
return id
def subscribeToChannels(self, packages, strict=0):
hash = {
'package_id': [],
'channel_id': [],
}
# Keep a list of packages for a channel too, so we can easily compare
# what's extra, if strict is 1
channel_packages = {}
sql = """
select channel_id
from rhnChannelPackage
where package_id = :package_id"""
affected_channels = {}
statement = self.dbmodule.prepare(sql)
for package in packages:
if package.ignored:
# Skip it
continue
if package.id is None:
raise InvalidPackageError(package, "Invalid package")
# Look it up first
statement.execute(package_id=package.id)
channels = {}
while 1:
row = statement.fetchone_dict()
if not row:
break
channels[row['channel_id']] = None
for channelId in package['channels'].keys():
# Build the channel-package list
if channelId in channel_packages:
cp = channel_packages[channelId]
else:
channel_packages[channelId] = cp = {}
cp[package.id] = None
if channelId in channels:
# Already subscribed
continue
dict = {
'package_id': package.id,
'channel_id': channelId,
}
if channelId not in affected_channels:
modified_packages = ([], [])
affected_channels[channelId] = modified_packages
else:
modified_packages = affected_channels[channelId]
# Package was added to this channel
modified_packages[0].append(package.id)
addHash(hash, dict)
# Packages we'd have to delete
extra_cp = {
'package_id': [],
'channel_id': [],
}
if strict:
# if strict remove the extra packages from the DB
sql = """
select package_id
from rhnChannelPackage
where channel_id = :channel_id
"""
else:
# or at least we should delete packages from different org
sql = """
select package_id
from rhnChannelPackage cp
join rhnPackage p
on p.id = cp.package_id
join rhnChannel c
on c.id = cp.channel_id
where cp.channel_id = :channel_id
and c.org_id != p.org_id
"""
statement = self.dbmodule.prepare(sql)
for channel_id, pid_hash in channel_packages.items():
statement.execute(channel_id=channel_id)
while 1:
row = statement.fetchone_dict()
if not row:
break
package_id = row['package_id']
if package_id not in pid_hash:
# Have to remove it
extra_cp['package_id'].append(package_id)
extra_cp['channel_id'].append(channel_id)
# And mark this channel as being affected
if channel_id not in affected_channels:
modified_packages = ([], [])
affected_channels[channel_id] = modified_packages
else:
modified_packages = affected_channels[channel_id]
# Package was deletef from this channel
modified_packages[1].append(package_id)
self.__doDeleteTable('rhnChannelPackage', extra_cp)
self.__doInsertTable('rhnChannelPackage', hash)
# This function returns the channels that were affected
return affected_channels
def update_newest_package_cache(self, caller, affected_channels, name_ids=[]):
# affected_channels is a hash keyed on the channel id, and with a
# tuple (added_package_list, deleted_package_list) as values
refresh_newest_package = self.dbmodule.Procedure('rhn_channel.refresh_newest_package')
update_channel = self.dbmodule.Procedure('rhn_channel.update_channel')
for channel_id, (added_packages_list, deleted_packages_list) in affected_channels.items():
try:
if name_ids:
for id in name_ids:
refresh_newest_package(channel_id, caller, id)
else:
refresh_newest_package(channel_id, caller, None)
except rhnSQL.SQLError:
e = sys.exc_info()[1]
raise_with_tb(rhnFault(23, str(e[1]), explain=0), sys.exc_info()[2])
if deleted_packages_list:
invalidate_ss = 1
else:
invalidate_ss = 0
update_channel(channel_id, invalidate_ss)
def processSourcePackages(self, packages, uploadForce=0, ignoreUploaded=0,
forceVerify=0, transactional=0):
# Insert/update the packages
childTables = []
for package in packages:
if not isinstance(package, SourcePackage):
raise TypeError("Expected a Package instance")
# Process the packages
self.__processObjectCollection(packages, 'rhnPackageSource', childTables,
'package_id', uploadForce=uploadForce, forceVerify=forceVerify,
ignoreUploaded=ignoreUploaded, severityLimit=1,
transactional=transactional)
def commit(self):
self.dbmodule.commit()
def rollback(self):
self.dbmodule.rollback()
def __processHash(self, lookup, hash):
if not hash:
# Nothing to do
return
h = rhnSQL.prepare("select " + lookup + "(:name) from dual")
for k in hash.keys():
h.execute(name=k)
# saving id
hash[k] = h.fetchone_dict().popitem()[1]
def __buildQueries(self, childTables):
childTableLookups = {}
queryTempl = "select * from %s where %s = :id"
for childTableName in childTables:
childTableLookups[childTableName] = self.dbmodule.prepare(
queryTempl % (childTableName, childTables[childTableName]))
return childTableLookups
def __processObjectCollection(self, objColl, parentTable, childTables=[],
colname=None, **kwargs):
# Returns the DML object that was processed
# This helps identify what the changes were
# XXX this is a total hack keeping tranlating the old interface into the
# new interface to keep me from having to change all the places in the
# code that call this method, as there are 10 of them...
childDict = {}
for tbl in childTables:
childDict[tbl] = colname
return self.__processObjectCollection__(objColl, parentTable, childDict, **kwargs)
def __processObjectCollection__(self, objColl, parentTable, childTables={},
**kwargs):
# Returns the DML object that was processed
# This helps identify what the changes were
# FIXME I need to break this method apart into smaller method calls that
# will allow *different* colname fields for different childTables
# NOTE objColl == packages
# Process the object collection, starting with parentTable, having
# colname as a link column between the parent and child tables
#
# We create a DML object for the operations we're supposed to perform
# on the database
kwparams = {
# The 'upload force'
'uploadForce': 0,
# Raises exceptions when the object is already uploaded
'ignoreUploaded': 0,
# Forces a full object verification - including the child tables
'forceVerify': 0,
# When the severity is below this limit, the object is not
# updated
'severityLimit': 0,
# All-or-nothing
'transactional': 0,
}
for k, v in kwargs.items():
if k not in kwparams:
raise TypeError("Unknown keyword parameter %s" % k)
if v is not None:
# Leave the default values in case of a None
kwparams[k] = v
uploadForce = kwparams['uploadForce']
ignoreUploaded = kwparams['ignoreUploaded']
severityLimit = kwparams['severityLimit']
transactional = kwparams['transactional']
forceVerify = kwparams['forceVerify']
# All the tables affected
tables = [parentTable] + list(childTables.keys())
# Build the hash for the operations on the tables
dml = DML(tables, self.tables)
# Reverse hash: object id to object for already-uploaded objects
uploadedObjects = {}
# Information related to the parent table
parentTableObj = self.tables[parentTable]
ptFields = parentTableObj.getFields()
severityHash = parentTableObj.getSeverityHash()
# A flag that indicates if something has to be modified beyond the
# current severity limit
brokenTransaction = 0
# Lookup object
lookup = TableLookup(parentTableObj, self.dbmodule)
# XXX
childTableLookups = self.__buildQueries(childTables)
# For each valid object in the collection, look it up
# if it doesn't exist, insert all the associated information
# if it already exists:
# save it in the uploadedObjects hash for later processing
# the object's diff member will contain data if that object
# failed to push; the content should be explicit enough about
# what failed
# The object's diff_result should reliably say if the object was
# different in any way, or if it was new. Each field that gets
# compared will present its won severity field (or the default
# one if not explicitly specified). The "global" severity is the
# max of all severities.
# New objects will have a diff level of -1
for object in objColl:
if object.ignored:
# Skip it
continue
h = lookup.query(object)
row = h.fetchone_dict()
if not row:
# Object does not exist
id = self.sequences[parentTable].next()
object.id = id
extObject = {'id': id}
_buildExternalValue(extObject, object, parentTableObj)
addHash(dml.insert[parentTable], extObject)
# Insert child table information
for tname in childTables:
tbl = self.tables[tname]
# Get the list of objects for this package
entry_list = object[tbl.getAttribute()]
if entry_list is None:
continue
for entry in entry_list:
extObject = {childTables[tname]: id}
seq_col = tbl.sequenceColumn
if seq_col:
# This table has to insert values in a sequenced
# column; since it's a child table and the entry
# in the master table is not created yet, there
# shouldn't be a problem with uniqueness
# constraints
new_id = self.sequences[tbl.name].next()
extObject[seq_col] = new_id
# Make sure we initialize the object's sequenced
# column as well
entry[seq_col] = new_id
_buildExternalValue(extObject, entry, tbl)
addHash(dml.insert[tname], extObject)
object.diff_result = Diff()
# New object
object.diff_result.level = -1
continue
# Already uploaded
if not ignoreUploaded:
raise AlreadyUploadedError(object, "Already uploaded")
# XXX package id set here!!!!!!!!!!
object.id = row['id']
# And save the object and the row for later processing
uploadedObjects[row['id']] = [object, row]
# Deal with already-uploaded objects
for objid, (object, row) in uploadedObjects.items():
# Build the external value
extObject = {'id': row['id']}
_buildExternalValue(extObject, object, parentTableObj)
# Build the DB value
row = _buildDatabaseValue(row, ptFields)
# compare them
object.diff = object.diff_result = Diff()
diffval = computeDiff(extObject, row, severityHash, object.diff)
if not forceVerify:
# If there is enough karma, force the full object check
# maybe they want the object overwritten
if uploadForce < object.diff.level and diffval <= severityLimit:
# Same object, or not different enough
# not enough karma either
continue
localDML = self.__processUploaded(objid, object, childTables,
childTableLookups)
if uploadForce < object.diff.level:
# Not enough karma
if object.diff.level > severityLimit:
# Broken transaction - object is too different
brokenTransaction = 1
continue
# Clean up the object diff since we pushed the package
object.diff = None
if diffval:
# Different parent object
localDML['update'][parentTable] = [extObject]
# And transfer the local DML to the global one
for k, tablehash in localDML.items():
dmlhash = getattr(dml, k)
for tname, vallist in tablehash.items():
for val in vallist:
addHash(dmlhash[tname], val)
if transactional and brokenTransaction:
raise TransactionError("Error uploading package source batch")
return self.__doDML(dml)
def __processUploaded(self, objid, object, childTables, childTableLookups):
# Store the DML operations locally
localDML = {
'insert': {},
'update': {},
'delete': {},
}
# Grab the rest of the information
childTablesInfo = self.__getChildTablesInfo(objid, list(childTables.keys()),
childTableLookups)
# Start computing deltas
for childTableName in childTables:
# Init the local hashes
for k in ['insert', 'update', 'delete']:
localDML[k][childTableName] = []
dbside = childTablesInfo[childTableName]
# The child table object
childTableObj = self.tables[childTableName]
# The name of the attribute in the parent object
parentattr = childTableObj.getAttribute()
# The list of entries associated with the attribute linked to
# this table
entrylist = object[parentattr]
fields = childTableObj.getFields()
pks = childTableObj.getPK()
childSeverityHash = childTableObj.getSeverityHash()
if entrylist is None:
continue
for ent in entrylist:
# Build the primary key
key = []
for f in pks:
if f == childTables[childTableName]:
# Special-case it
key.append(objid)
continue
datatype = fields[f]
# Get the proper attribute name for this column
attr = childTableObj.getObjectAttribute(f)
key.append(sanitizeValue(ent[attr], datatype))
key = tuple(key)
# Build the value
val = {childTables[childTableName]: objid}
if childTableObj.sequenceColumn:
# Initialize the sequenced column with a dummy value
ent[childTableObj.sequenceColumn] = None
_buildExternalValue(val, ent, childTableObj)
# Look this value up
if key not in dbside:
if childTableObj.sequenceColumn:
# Initialize the sequence column too
sc = childTableObj.sequenceColumn
nextid = self.sequences[childTableName].next()
val[sc] = ent[sc] = nextid
# This entry has to be inserted
object.diff.append((parentattr, val, None))
# XXX change to a default value
object.diff.setLevel(4)
localDML['insert'][childTableName].append(val)
continue
# Already exists in the DB
dbval = _buildDatabaseValue(dbside[key], fields)
if childTableObj.sequenceColumn:
# Copy the sequenced value - we dpn't want it updated
sc = childTableObj.sequenceColumn
val[sc] = ent[sc] = dbval[sc]
# check for updates
diffval = computeDiff(val, dbval, childSeverityHash,
object.diff, parentattr)
if not diffval:
# Same value
del dbside[key]
continue
# Different value; have to update the entry
localDML['update'][childTableName].append(val)
del dbside[key]
if childTableName == 'rhnErrataPackage':
continue
# Anything else should be deleted
for key, val in dbside.items():
# Send only the PKs
hash = {}
for k in pks:
hash[k] = val[k]
# XXX change to a default value
object.diff.setLevel(4)
localDML['delete'][childTableName].append(hash)
object.diff.append((parentattr, None, val))
return localDML
def __doDML(self, dml):
self.__doDelete(dml.delete, dml.tables)
self.__doUpdate(dml.update, dml.tables)
self.__doInsert(dml.insert, dml.tables)
return dml
def __doInsert(self, hash, tables):
for tname in tables:
dict = hash[tname]
try:
self.__doInsertTable(tname, dict)
except rhnSQL.SQLError:
e = sys.exc_info()[1]
raise_with_tb(rhnFault(54, str(e[1]), explain=0), sys.exc_info()[2])
def __doInsertTable(self, table, hash):
if not hash:
return
tab = self.tables[table]
k = list(hash.keys())[0]
if not hash[k]:
# Nothing to do
return
insertObj = TableInsert(tab, self.dbmodule)
insertObj.query(hash)
return
def __doDelete(self, hash, tables):
for tname in tables:
dict = hash[tname]
self.__doDeleteTable(tname, dict)
def __doDeleteTable(self, tname, hash):
if not hash:
return
tab = self.tables[tname]
# Need to extract the primary keys and look for items to delete only
# in those columns, the other ones may not be populated
# See bug 154216 for details (misa 2005-04-08)
pks = tab.getPK()
k = pks[0]
if not hash[k]:
# Nothing to do
return
deleteObj = TableDelete(tab, self.dbmodule)
deleteObj.query(hash)
def __doUpdate(self, hash, tables):
for tname in tables:
dict = hash[tname]
self.__doUpdateTable(tname, dict)
def __doUpdateTable(self, tname, hash):
if not hash:
return
tab = self.tables[tname]
# See bug 154216 for details (misa 2005-04-08)
pks = tab.getPK()
k = pks[0]
if not hash[k]:
# Nothing to do
return
updateObj = TableUpdate(tab, self.dbmodule)
updateObj.query(hash)
return
def __lookupObjectCollection(self, objColl, tableName, ignore_missing=0):
# Looks the object up in tableName, and fills in its id
lookup = TableLookup(self.tables[tableName], self.dbmodule)
for object in objColl:
if object.ignored:
# Skip it
continue
h = lookup.query(object)
row = h.fetchone_dict()
if not row:
if ignore_missing:
# Ignore the missing objects
object.ignored = 1
continue
# Invalid
raise InvalidPackageError(object, "Could not find object %s in table %s" % (object, tableName))
object.id = row['id']
def __getChildTablesInfo(self, id, tables, queries):
# Returns a hash with the information about package id from tables
result = {}
for tname in tables:
tableobj = self.tables[tname]
fields = tableobj.getFields()
q = queries[tname]
q.execute(id=id)
hash = {}
while 1:
row = q.fetchone_dict()
if not row:
break
pks = tableobj.getPK()
key = []
for f in pks:
value = row[f]
datatype = fields[f]
value = sanitizeValue(value, datatype)
key.append(value)
val = {}
for f, datatype in fields.items():
value = row[f]
value = sanitizeValue(value, datatype)
val[f] = value
hash[tuple(key)] = val
result[tname] = hash
return result
def __populateTable(self, table_name, data, delete_extra=1):
table = self.tables[table_name]
fields = table.getFields()
# Build a hash with the incoming data
incoming = {}
for entry in data:
t = hash2tuple(entry, fields)
incoming[t] = entry
# Build the query to dump the table's contents
h = self.dbmodule.prepare("select * from %s" % table.name)
h.execute()
deletes = {}
inserts = {}
for f in fields.keys():
inserts[f] = []
deletes[f] = []
while 1:
row = h.fetchone_dict()
if not row:
break
t = hash2tuple(row, fields)
if t in incoming:
# we already have this value uploaded
del incoming[t]
continue
addHash(deletes, row)
for row in incoming.values():
addHash(inserts, row)
if delete_extra:
self.__doDeleteTable(table.name, deletes)
self.__doInsertTable(table.name, inserts)
# This function does a diff on the specified table name for the presented
# data, using pk_fields as unique fields
def _do_diff(self, data, table_name, uq_fields, fields):
first_uq_col = uq_fields[0]
uq_col_values = {}
all_fields = uq_fields + fields
for entry in data:
for f in all_fields:
if f not in entry:
raise Exception("Missing field %s" % f)
val = entry[first_uq_col]
if val not in uq_col_values:
valhash = {}
uq_col_values[val] = valhash
else:
valhash = uq_col_values[val]
key = build_key(entry, uq_fields)
valhash[key] = entry
query = "select %s from %s where %s = :%s" % (
string.join(all_fields, ", "),
table_name,
first_uq_col, first_uq_col,
)
h = self.dbmodule.prepare(query)
updates = []
deletes = []
for val, valhash in uq_col_values.items():
params = {first_uq_col: val}
h.execute(**params)
while 1:
row = h.fetchone_dict()
if not row:
break
key = build_key(row, uq_fields)
if key not in valhash:
# Need to delete this one
deletes.append(row)
continue
entry = valhash[key]
for f in fields:
if entry[f] != row[f]:
# Different, we have to update
break
else:
# Same value, remove it from valhash
del valhash[key]
continue
# Need to update
updates.append(entry)
inserts = []
list(map(inserts.extend, [list(x.values()) for x in list(uq_col_values.values())]))
if deletes:
params = transpose(deletes, uq_fields)
query = "delete from %s where %s" % (
table_name,
string.join(["%s = :%s" % (x, x) for x in uq_fields],
' and '),
)
h = self.dbmodule.prepare(query)
h.executemany(**params)
if inserts:
params = transpose(inserts, all_fields)
query = "insert into %s (%s) values (%s)" % (
table_name,
string.join(all_fields, ', '),
string.join([":" + x for x in all_fields], ', '),
)
h = self.dbmodule.prepare(query)
h.executemany(**params)
if updates:
params = transpose(updates, all_fields)
query = "update % set %s where %s" % (
table_name,
string.join(["%s = :s" + (x, x) for x in fields],
', '),
string.join(["%s = :%s" % (x, x) for x in uq_fields],
' and '),
)
h = self.dbmodule.prepare(query)
h.executemany(**params)
def validate_pks(self):
# If nevra is enabled use checksum as primary key
tbs = self.tables['rhnPackage']
if not CFG.ENABLE_NVREA:
# remove checksum from a primary key if nevra is disabled.
if 'checksum_id' in tbs.pk:
tbs.pk.remove('checksum_id')
# Returns a tuple for the hash's values
def build_key(hash, fields):
return tuple(map(lambda x, h=hash: h[x], fields))
def transpose(arrhash, fields):
params = {}
for f in fields:
params[f] = []
for h in arrhash:
for f in fields:
params[f].append(h[f])
return params
def hash2tuple(hash, fields):
# Converts the hash into a tuple, with the fields ordered as presented in
# the fields list
result = []
for fname, ftype in fields.items():
result.append(sanitizeValue(hash[fname], ftype))
return tuple(result)
class DML:
def __init__(self, tables, tableHash):
self.update = {}
self.delete = {}
self.insert = {}
self.tables = tables
for k in ('insert', 'update', 'delete'):
dmlhash = {}
setattr(self, k, dmlhash)
for tname in tables:
hash = {}
for f in tableHash[tname].getFields().keys():
hash[f] = []
dmlhash[tname] = hash
def _buildDatabaseValue(row, fieldsHash):
# Returns a dictionary containing the interesting values of the row,
# sanitized
dict = {}
for f, datatype in fieldsHash.items():
dict[f] = sanitizeValue(row[f], datatype)
return dict
def _buildExternalValue(dict, entry, tableObj):
# updates dict with values from entry
# entry is a hash-like object (non-db)
for f, datatype in tableObj.getFields().items():
if f in dict:
# initialized somewhere else
continue
# Get the attribute's name
attr = tableObj.getObjectAttribute(f)
# Sanitize the value according to its datatype
if attr not in entry:
entry[attr] = None
dict[f] = sanitizeValue(entry[attr], datatype)
def computeDiff(hash1, hash2, diffHash, diffobj, prefix=None):
# Compare if the key-values of hash1 are a subset of hash2's
difference = 0
ignore_keys = ['last_modified']
for k, v in hash1.items():
if k in ignore_keys:
# Dont decide the diff based on last_modified
# as this obviously wont match due to our db
# other triggers.
continue
if hash2[k] == v:
# Same values
continue
if k == 'installed_size' and v is not None and hash2[k] is None:
# Skip installed_size which might not have been populated
continue
if k in diffHash:
diffval = diffHash[k]
if diffval == 0:
# Completely ignore this key
continue
else:
diffval = diffobj.level + 1
if prefix:
diffkey = prefix + '::' + k
else:
diffkey = k
diffobj.setLevel(diffval)
diffobj.append((diffkey, v, hash2[k]))
difference = diffobj.level
return difference
|
lhellebr/spacewalk
|
backend/server/importlib/backend.py
|
Python
|
gpl-2.0
| 74,266
|
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def test_set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def test_set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1 // x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not raised!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
def jump_across_with(output):
with open(test_support.TESTFN, "wb") as fp:
pass
with open(test_support.TESTFN, "wb") as fp:
pass
jump_across_with.jump = (1, 3)
jump_across_with.output = []
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_jump_across_with(self):
self.addCleanup(test_support.unlink, test_support.TESTFN)
self.run_test(jump_across_with)
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
# file. See issue #1689458. It can only be triggered in a
# function call if the function is defined on a single line.
code = compile("""
# Comments don't count.
output.append(2) # firstlineno is here.
output.append(3)
output.append(4)
""", "<fake module>", "exec")
class fake_function:
func_code = code
jump = (2, 0)
tracer = JumpTracer(fake_function)
sys.settrace(tracer.trace)
namespace = {"output": []}
exec code in namespace
sys.settrace(None)
self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"])
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
|
j5shi/Thruster
|
pylibs/test/test_sys_settrace.py
|
Python
|
gpl-2.0
| 24,801
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# MCT - Mini Cliente Torrent para pelisalacarta
#------------------------------------------------------------
import os
import re
import shutil
import tempfile
import urllib
import urllib2
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
import xbmc
import xbmcgui
from core import config
from core import scrapertools
from core import filetools
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
#torrent_file = os.path.join(save_path_torrents, re_name+'.torrent')
torrent_file = filetools.join(save_path_torrents, unicode(re_name, "'utf-8'", errors="replace")+'.torrent')
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
tempdir = tempfile.mkdtemp()
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Descarga completa: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Progreso de la descarga ------------------------------------
def getProgress(h, video_file, _pf={}):
if len(_pf) > 0:
#_pf_msg = "[%s] [%s] [%s] [%s][CR]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
_pf_msg = "[%s] [%s] [%s] [%s]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
else: _pf_msg = ""
s = h.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume']
message = '%.2f%% d:%.1f kb/s u:%.1f kb/s p:%d s:%d %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, s.num_seeds, state_str[s.state])
porcent = int( s.progress * 100 )
download = ( s.progress * 100 )
if "/" in video_file: video_file = video_file.split("/")[1]
#msg_file = "..../"+video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
#msg_file = video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
msg_file = video_file
#msg_file = "[%s] "%len(msg_file)+_pf_msg+msg_file
if len(msg_file) > 50:
msg_file = msg_file.replace( video_file, os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1] )
msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted/1048576.0) + " - " + _pf_msg
return (message, porcent, msg_file, s, download)
# -- Clase play_video - Controlar eventos -----------------------
class play_video(xbmc.Player):
def __init__( self, *args, **kwargs ):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
self.ended = False
def onPlayBackPaused(self):
self.paused = True
self.resumed = False
def onPlayBackResumed(self):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
def is_paused(self):
return self.paused
def setDialogoProgress(self):
self.statusDialogoProgress = True
def is_started(self):
self.ended = False
def is_ended(self):
self.ended = True
# -- Conseguir el nombre un alchivo de vídeo del metadata -------
# -- El más gordo o uno de los más gordo se entiende que es el -
# -- vídeo o es vídeo que se usará como referencia para el tipo -
# -- de archivo -
def get_video_file( info ):
size_file = 0
for i, f in enumerate(info.files()):
if f.size > size_file:
video_file = f.path.replace("\\","/")
size_file = f.size
index_file = i
return index_file, video_file, size_file
# -- Listado de selección del vídeo a prioritarizar -------------
def get_video_files_sizes( info ):
opciones = []
vfile_name = {}
vfile_size = {}
for i, f in enumerate( info.files() ):
#_title = f.path
#try: _title = f.path.encode('iso-8859-1')
#except: _title = f.path.decode('utf-8')
#_title = f.path.encode('iso-8859-1')
_title = unicode(f.path, "iso-8859-1", errors="replace")
_title = unicode(f.path, "'utf-8'", errors="replace")
_title = re.sub(r'(.*? )- Temporada (\d+) Completa(.*?)',
r'\1T\2\3',
_title)
_title = re.sub(r'\s\([^\)]+\)|\s\-',
'',
_title)
info.rename_file( i, _title )
for i, f in enumerate( info.files() ):
_index = int(i)
_title = f.path.replace("\\","/")
_size = f.size
_offset = f.offset
_file_name = os.path.splitext( _title )[0]
if "/" in _file_name: _file_name = _file_name.split('/')[1]
_file_ext = os.path.splitext( _title )[1]
_caption = str(i) + \
" - " + \
_file_name + _file_ext + \
" - %.2f MB" % (_size / 1048576.0)
vfile_name[i] = _title
vfile_size[i] = _size
opciones.append(_caption)
if len(opciones) > 1:
d = xbmcgui.Dialog()
seleccion = d.select("pelisalacarta-MCT: Lista de vídeos", opciones)
else: seleccion = 0
if seleccion == -1:
vfile_name[seleccion] = ""
vfile_size[seleccion] = 0
return seleccion, vfile_name[seleccion], vfile_size[seleccion]
# -- Preguntar si se desea borrar lo descargado -----------------
def remove_files( download, torrent_file, video_file, ses, h ):
dialog_view = False
torrent = False
if os.path.isfile( torrent_file ):
dialog_view = True
torrent = True
if download > 0:
dialog_view = True
if "/" in video_file: video_file = video_file.split("/")[0]
if dialog_view:
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'Borrar las descargas del video', video_file)
# -- SI -------------------------------------------------
if ok:
# -- Borrar archivo - torrent -----------------------
if torrent:
os.remove( torrent_file )
# -- Borrar carpeta/archivos y sesión - vídeo -------
ses.remove_torrent( h, 1 )
print "### End session #########"
else:
# -- Borrar sesión ----------------------------------
ses.remove_torrent( h )
print "### End session #########"
else:
# -- Borrar sesión --------------------------------------
ses.remove_torrent( h )
print "### End session #########"
return
# -- Descargar de la web los datos para crear el torrent --------
# -- Si queremos aligerar el script mct.py se puede importar la -
# -- función del conentor torrent.py -
def url_get(url, params={}, headers={}):
from contextlib import closing
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0"
if params:
import urllib
url = "%s?%s" % (url, urllib.urlencode(params))
req = urllib2.Request(url)
req.add_header("User-Agent", USER_AGENT)
for k, v in headers.items():
req.add_header(k, v)
try:
with closing(urllib2.urlopen(req)) as response:
data = response.read()
if response.headers.get("Content-Encoding", "") == "gzip":
import zlib
return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)
return data
except urllib2.HTTPError:
return None
# -- Procedimiento para log de have_piece en las pruebas --------
def print_have_piece_set(h, piece_set):
c = 0
_print = "\n"
for i, _set in enumerate(piece_set):
if h.have_piece(_set): _print+= "[%s]" % str(_set).zfill(5)
else: _print+= "[XXXXX]"
c+= 1
if c == 20:
c = 0
_print+= "\n"
print _print
# -- Contar las piezas contiguas completas del vídeo ------------
def count_completed_continuous_pieces(h, piece_set):
not_zero = 0
for i, _set in enumerate(piece_set):
if not h.have_piece(_set): break
else: not_zero = 1
return i + not_zero
# -- Prioritarizar o seleccionar las piezas del archivo que se -
# -- desea reproducir con 'file_priorities' estableciendo a 1 -
# -- el archivo deseado y a 0 el resto de archivos almacenando -
# -- en una lista los índices de de las piezas del archivo -
def set_priority_pieces(h, _index, video_file, video_size):
for i, _set in enumerate(h.file_priorities()):
if i != _index: h.file_priority(i,0)
else: h.file_priority(i,1)
piece_set = []
for i, _set in enumerate(h.piece_priorities()):
if _set == 1: piece_set.append(i)
return piece_set
|
MoRgUiJu/morguiju.repo
|
plugin.video.pelisalacarta/platformcode/mct.py
|
Python
|
gpl-2.0
| 29,761
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSearch Flask Blueprint."""
import cStringIO
from functools import wraps
from flask import g, render_template, request, flash, redirect, url_for, \
current_app, abort, Blueprint, send_file
from flask_breadcrumbs import register_breadcrumb
from flask_breadcrumbs import default_breadcrumb_root
from flask_login import current_user
from flask_menu import register_menu
from invenio.base.decorators import wash_arguments
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.base.signals import pre_template_render
from invenio.config import CFG_SITE_RECORD
from invenio.ext.template.context_processor import \
register_template_context_processor
from invenio.modules.search.models import Collection
from invenio.modules.search.signals import record_viewed
from invenio.utils import apache
from .api import get_record
from .models import Record as Bibrec
from .utils import references_nb_counts, citations_nb_counts, \
visible_collection_tabs
blueprint = Blueprint('record', __name__, url_prefix="/" + CFG_SITE_RECORD,
static_url_path='/record', template_folder='templates',
static_folder='static')
default_breadcrumb_root(blueprint, 'breadcrumbs.record')
def request_record(f):
"""Perform standard operation to check record availability for user."""
@wraps(f)
def decorated(recid, *args, **kwargs):
from invenio.modules.access.mailcookie import \
mail_cookie_create_authorize_action
from invenio.modules.access.local_config import VIEWRESTRCOLL
from invenio.legacy.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record
from invenio.b2share.modules.main.utils import check_fresh_record
# ensure recid to be integer
recid = int(recid)
from invenio.legacy.search_engine import record_exists, get_merged_recid
if record_exists(recid) == 0:
# record doesn't exist, abort so it doesn't get incorrectly cached
abort(apache.HTTP_NOT_FOUND) # The record is gone!
if check_fresh_record(current_user, recid):
return render_template('record_waitforit.html', recid=recid)
g.collection = collection = Collection.query.filter(
Collection.name == guess_primary_collection_of_a_record(recid)).\
one()
(auth_code, auth_msg) = check_user_can_view_record(current_user, recid)
# only superadmins can use verbose parameter for obtaining debug
# information
if not current_user.is_super_admin and 'verbose' in kwargs:
kwargs['verbose'] = 0
if auth_code and current_user.is_guest:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {
'collection': g.collection.name})
url_args = {'action': cookie, 'ln': g.ln, 'referer': request.url}
flash(_("Authorization failure"), 'error')
return redirect(url_for('webaccount.login', **url_args))
elif auth_code:
flash(auth_msg, 'error')
abort(apache.HTTP_UNAUTHORIZED)
from invenio.legacy.search_engine import record_exists, \
get_merged_recid
# check if the current record has been deleted
# and has been merged, case in which the deleted record
# will be redirect to the new one
record_status = record_exists(recid)
merged_recid = get_merged_recid(recid)
if record_status == -1 and merged_recid:
return redirect(url_for('record.metadata', recid=merged_recid))
elif record_status == -1:
abort(apache.HTTP_GONE) # The record is gone!
g.bibrec = Bibrec.query.get(recid)
record = get_record(recid)
if record is None:
return render_template('404.html')
title = record.get(cfg.get('RECORDS_BREADCRUMB_TITLE_KEY'), '')
tabs = []
if cfg.get('CFG_WEBLINKBACK_TRACKBACK_ENABLED'):
@register_template_context_processor
def trackback_context():
from invenio.legacy.weblinkback.templates import \
get_trackback_auto_discovery_tag
return {'headerLinkbackTrackbackLink':
get_trackback_auto_discovery_tag(recid)}
def _format_record(recid, of='hd', user_info=current_user, *args,
**kwargs):
from invenio.modules.formatter import format_record
return format_record(recid, of, user_info=user_info, *args,
**kwargs)
@register_template_context_processor
def record_context():
from invenio.modules.comments.api import get_mini_reviews
from invenio.legacy.bibdocfile.api import BibRecDocs
all_files = [f for f in BibRecDocs(recid, human_readable=True).list_latest_files(list_hidden=False) \
if not f.is_icon()]
files = [f for f in all_files if f.is_restricted(current_user)[0] == 0]
has_private_files = len(files) < len(all_files)
return dict(recid=recid,
record=record,
tabs=tabs,
title=title,
get_mini_reviews=get_mini_reviews,
collection=collection,
format_record=_format_record,
has_private_files=has_private_files,
files=files
)
pre_template_render.send(
"%s.%s" % (blueprint.name, f.__name__),
recid=recid,
)
return f(recid, *args, **kwargs)
return decorated
@blueprint.route('/<int:recid>/metadata', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/export/<of>', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.', _('Record'))
@wash_arguments({'of': (unicode, 'hd'), 'ot': (unicode, None)})
@request_record
@register_menu(blueprint, 'record.metadata', _('Information'), order=1,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('metadata'))
def metadata(recid, of='hd', ot=None):
"""Display formated record metadata."""
from invenio.legacy.bibrank.downloads_similarity import \
register_page_view_event
from invenio.modules.formatter import get_output_format_content_type
register_page_view_event(recid, current_user.get_id(),
str(request.remote_addr))
if get_output_format_content_type(of) != 'text/html':
from invenio.modules.search.views.search import \
response_formated_records
return response_formated_records([recid], g.collection, of, qid=None)
# Send the signal 'document viewed'
record_viewed.send(
current_app._get_current_object(),
recid=recid,
id_user=current_user.get_id(),
request=request)
from invenio.b2share.modules.b2deposit.edit import is_record_editable
return render_template('records/metadata.html', of=of, ot=ot,
editable=is_record_editable(recid))
@blueprint.route('/<int:recid>/references', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.references', _('References'), order=2,
visible_when=visible_collection_tabs('references'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=references_nb_counts)
def references(recid):
"""Display references."""
return render_template('records/references.html')
@blueprint.route('/<int:recid>/files', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.files', _('Files'), order=8,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('files'))
def files(recid):
"""Return overview of attached files."""
def get_files():
from invenio.legacy.bibdocfile.api import BibRecDocs
for bibdoc in BibRecDocs(recid).list_bibdocs():
for file in bibdoc.list_all_files():
yield file.get_url()
return render_template('records/files.html', files=list(get_files()))
@blueprint.route('/<int:recid>/files/<path:filename>', methods=['GET'])
@request_record
def file(recid, filename):
"""Serve attached documents."""
from invenio.modules.documents import api
record = get_record(recid)
duuids = [uuid for (k, uuid) in record.get('_documents', [])
if k == filename]
error = 404
for duuid in duuids:
document = api.Document.get_document(duuid)
if not document.is_authorized(current_user):
current_app.logger.info(
"Unauthorized access to /{recid}/files/{filename} "
"({document}) by {current_user}".format(
recid=recid, filename=filename, document=document,
current_user=current_user))
error = 401
continue
# TODO add logging of downloads
if document.get('linked', False):
if document.get('uri').startswith('http://') or \
document.get('uri').startswith('https://'):
return redirect(document.get('uri'))
# FIXME create better streaming support
file_ = cStringIO.StringIO(document.open('rb').read())
file_.seek(0)
return send_file(file_, mimetype='application/octet-stream',
attachment_filename=filename)
return send_file(document['uri'])
abort(error)
@blueprint.route('/<int:recid>/citations', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.citations', _('Citations'), order=3,
visible_when=visible_collection_tabs('citations'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=citations_nb_counts)
def citations(recid):
"""Display citations."""
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list,\
get_self_cited_by, calculate_co_cited_with_list
citations = dict(
citinglist=calculate_cited_by_list(recid),
selfcited=get_self_cited_by(recid),
co_cited=calculate_co_cited_with_list(recid)
)
return render_template('records/citations.html',
citations=citations)
@blueprint.route('/<int:recid>/keywords', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.keywords', _('Keywords'), order=4,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('keywords'))
def keywords(recid):
"""Return keywords overview."""
from invenio.legacy.bibclassify.webinterface import record_get_keywords
found, keywords, record = record_get_keywords(recid)
return render_template('records/keywords.html',
found=found,
keywords=keywords)
@blueprint.route('/<int:recid>/usage', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.usage', _('Usage statistics'), order=7,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('usage'))
def usage(recid):
"""Return usage statistics."""
from invenio.legacy.bibrank.downloads_similarity import \
calculate_reading_similarity_list
from invenio.legacy.bibrank.downloads_grapher import \
create_download_history_graph_and_box
viewsimilarity = calculate_reading_similarity_list(recid, "pageviews")
downloadsimilarity = calculate_reading_similarity_list(recid, "downloads")
downloadgraph = create_download_history_graph_and_box(recid)
return render_template('records/usage.html',
viewsimilarity=viewsimilarity,
downloadsimilarity=downloadsimilarity,
downloadgraph=downloadgraph)
@blueprint.route('/', methods=['GET', 'POST'])
def no_recid():
"""Redirect to homepage."""
return redirect("/")
|
cjhak/b2share
|
invenio/modules/records/views.py
|
Python
|
gpl-2.0
| 13,449
|
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
"""
Edit metadata in RTF files.
"""
import re, cStringIO, codecs
from calibre import force_unicode
from calibre.ebooks.metadata import MetaInformation, string_to_authors
title_pat = re.compile(r'\{\\info.*?\{\\title(.*?)(?<!\\)\}', re.DOTALL)
author_pat = re.compile(r'\{\\info.*?\{\\author(.*?)(?<!\\)\}', re.DOTALL)
comment_pat = re.compile(r'\{\\info.*?\{\\subject(.*?)(?<!\\)\}', re.DOTALL)
tags_pat = re.compile(r'\{\\info.*?\{\\category(.*?)(?<!\\)\}', re.DOTALL)
publisher_pat = re.compile(r'\{\\info.*?\{\\manager(.*?)(?<!\\)\}', re.DOTALL)
def get_document_info(stream):
"""
Extract the \info block from an RTF file.
Return the info block as a string and the position in the file at which it
starts.
@param stream: File like object pointing to the RTF file.
"""
block_size = 4096
stream.seek(0)
found, block = False, ""
while not found:
prefix = block[-6:]
block = prefix + stream.read(block_size)
actual_block_size = len(block) - len(prefix)
if len(block) == len(prefix):
break
idx = block.find(r'{\info')
if idx >= 0:
found = True
pos = stream.tell() - actual_block_size + idx - len(prefix)
stream.seek(pos)
else:
if block.find(r'\sect') > -1:
break
if not found:
return None, 0
data, count, = cStringIO.StringIO(), 0
pos = stream.tell()
while True:
ch = stream.read(1)
if ch == '\\':
data.write(ch + stream.read(1))
continue
if ch == '{':
count += 1
elif ch == '}':
count -= 1
data.write(ch)
if count == 0:
break
return data.getvalue(), pos
def detect_codepage(stream):
pat = re.compile(r'\\ansicpg(\d+)')
match = pat.search(stream.read(512))
if match is not None:
num = match.group(1)
if num == '0':
num = '1252'
codec = 'cp'+num
try:
codecs.lookup(codec)
return codec
except:
pass
def encode(unistr):
if not isinstance(unistr, unicode):
unistr = force_unicode(unistr)
return ''.join([str(c) if ord(c) < 128 else '\\u' + str(ord(c)) + '?' for c in unistr])
def decode(raw, codec):
if codec is not None:
def codepage(match):
return chr(int(match.group(1), 16))
raw = re.sub(r"\\'([a-fA-F0-9]{2})", codepage, raw)
raw = raw.decode(codec)
def uni(match):
return unichr(int(match.group(1)))
raw = re.sub(r'\\u([0-9]{3,4}).', uni, raw)
return raw
def get_metadata(stream):
"""
Return metadata as a L{MetaInfo} object
"""
stream.seek(0)
if stream.read(5) != r'{\rtf':
return MetaInformation(_('Unknown'))
block = get_document_info(stream)[0]
if not block:
return MetaInformation(_('Unknown'))
stream.seek(0)
cpg = detect_codepage(stream)
stream.seek(0)
title_match = title_pat.search(block)
if title_match is not None:
title = decode(title_match.group(1).strip(), cpg)
else:
title = _('Unknown')
author_match = author_pat.search(block)
if author_match is not None:
author = decode(author_match.group(1).strip(), cpg)
else:
author = None
mi = MetaInformation(title)
if author:
mi.authors = string_to_authors(author)
comment_match = comment_pat.search(block)
if comment_match is not None:
comment = decode(comment_match.group(1).strip(), cpg)
mi.comments = comment
tags_match = tags_pat.search(block)
if tags_match is not None:
tags = decode(tags_match.group(1).strip(), cpg)
mi.tags = list(filter(None, (x.strip() for x in tags.split(','))))
publisher_match = publisher_pat.search(block)
if publisher_match is not None:
publisher = decode(publisher_match.group(1).strip(), cpg)
mi.publisher = publisher
return mi
def create_metadata(stream, options):
md = [r'{\info']
if options.title:
title = encode(options.title)
md.append(r'{\title %s}'%(title,))
if options.authors:
au = options.authors
if not isinstance(au, basestring):
au = u', '.join(au)
author = encode(au)
md.append(r'{\author %s}'%(author,))
comp = options.comment if hasattr(options, 'comment') else options.comments
if comp:
comment = encode(comp)
md.append(r'{\subject %s}'%(comment,))
if options.publisher:
publisher = encode(options.publisher)
md.append(r'{\manager %s}'%(publisher,))
if options.tags:
tags = u', '.join(options.tags)
tags = encode(tags)
md.append(r'{\category %s}'%(tags,))
if len(md) > 1:
md.append('}')
stream.seek(0)
src = stream.read()
ans = src[:6] + u''.join(md) + src[6:]
stream.seek(0)
stream.write(ans)
def set_metadata(stream, options):
'''
Modify/add RTF metadata in stream
@param options: Object with metadata attributes title, author, comment, category
'''
def add_metadata_item(src, name, val):
index = src.rindex('}')
return src[:index] + r'{\ '[:-1] + name + ' ' + val + '}}'
src, pos = get_document_info(stream)
if src is None:
create_metadata(stream, options)
else:
olen = len(src)
base_pat = r'\{\\name(.*?)(?<!\\)\}'
title = options.title
if title is not None:
title = encode(title)
pat = re.compile(base_pat.replace('name', 'title'), re.DOTALL)
if pat.search(src):
src = pat.sub(r'{\\title ' + title + r'}', src)
else:
src = add_metadata_item(src, 'title', title)
comment = options.comments
if comment is not None:
comment = encode(comment)
pat = re.compile(base_pat.replace('name', 'subject'), re.DOTALL)
if pat.search(src):
src = pat.sub(r'{\\subject ' + comment + r'}', src)
else:
src = add_metadata_item(src, 'subject', comment)
author = options.authors
if author is not None:
author = ', '.join(author)
author = encode(author)
pat = re.compile(base_pat.replace('name', 'author'), re.DOTALL)
if pat.search(src):
src = pat.sub(r'{\\author ' + author + r'}', src)
else:
src = add_metadata_item(src, 'author', author)
tags = options.tags
if tags is not None:
tags = ', '.join(tags)
tags = encode(tags)
pat = re.compile(base_pat.replace('name', 'category'), re.DOTALL)
if pat.search(src):
src = pat.sub(r'{\\category ' + tags + r'}', src)
else:
src = add_metadata_item(src, 'category', tags)
publisher = options.publisher
if publisher is not None:
publisher = encode(publisher)
pat = re.compile(base_pat.replace('name', 'manager'), re.DOTALL)
if pat.search(src):
src = pat.sub(r'{\\manager ' + publisher + r'}', src)
else:
src = add_metadata_item(src, 'manager', publisher)
stream.seek(pos + olen)
after = stream.read()
stream.seek(pos)
stream.truncate()
stream.write(src)
stream.write(after)
|
jelly/calibre
|
src/calibre/ebooks/metadata/rtf.py
|
Python
|
gpl-3.0
| 7,633
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# Program Name: holy_orders
# Program Description: Update program for the Abbot Cantus API server.
#
# Filename: holy_orders/current.py
# Purpose: Functions to determine which resources to update.
#
# Copyright (C) 2015, 2016 Christopher Antila
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
'''
Functions to determine which resources to update.
'''
import datetime
import logging
import tornado.log
import iso8601
# settings
LOG_LEVEL = logging.DEBUG
# script-level "globals"
_log = tornado.log.app_log
def _now_wrapper():
'''
A wrapper function for datetime.datetime.utcnow() that can be mocked for automated tests.
'''
return datetime.datetime.now(datetime.timezone.utc)
def get_last_updated(updates_db, rtype):
'''
Get a :class:`datetime` of the most recent update for a resource type.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:param str rtype: The resource type to check.
:returns: The time of the most recent update for the resource type.
:rtype: :class:`datetime.datetime`
If the database's most recent update is recorded as ``'never'``, meaning the resource type was
never updated, the :class:`datetime` returned corresponds to Unix time ``0``.
'''
last_update = updates_db.cursor().execute('SELECT updated FROM rtypes WHERE name=?', (rtype,))
last_update = last_update.fetchone()[0]
if last_update == 'never':
return datetime.datetime.fromtimestamp(0.0)
else:
return iso8601.parse_date(last_update)
def should_update(rtype, config, updates_db):
'''
Check whether HolyOrders "should update" resources of a particular type.
:param str rtype: The resource type to check.
:param config: Dictionary of the configuration file that has our data.
:type config: :class:`configparser.ConfigParser`
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:returns: Whether the resource type should be updated.
:rtype: bool
'''
last_update = get_last_updated(updates_db, rtype)
if last_update.year < 1990:
_log.info('should_update({0}) -> True (first update)'.format(rtype))
return True
late_update_delta = _now_wrapper() - last_update
update_freq_delta = config['update_frequency'][rtype]
if update_freq_delta.endswith('d'):
update_freq_delta = datetime.timedelta(days=int(update_freq_delta[:-1]))
else:
update_freq_delta = datetime.timedelta(hours=int(update_freq_delta[:-1]))
if late_update_delta >= update_freq_delta:
_log.info('should_update({0}) -> True'.format(rtype))
return True
else:
_log.info('should_update({0}) -> False'.format(rtype))
return False
def calculate_chant_updates(updates_db):
'''
Determine which dates should be requested for updates of "chant" resources.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:returns: The dates that require an update. These are formatted as YYYYMMD, so they may be used
directly in Drupal URLs.
:rtype: list of str
If no updates are required, the function returns an empty list. To ensure no updates are missed,
this function always includes one additional day than required. For example, if the most recent
update was earlier today, then this function requests updates for both today and yesterday.
However, also note that "days ago" is determined in 24-hour periods, rather than the "yesterday"
style of thinking that humans use. The actual dates requested aren't especially important---it's
enough to know that this function errs on the side of requesting more days than required.
'''
post = []
last_update = get_last_updated(updates_db, 'chant')
delta = _now_wrapper() - last_update
if delta.total_seconds() >= 0:
days_to_request = delta.days + 2
one_day = datetime.timedelta(days=1)
cursor = _now_wrapper()
for _ in range(days_to_request):
post.append(cursor.strftime('%Y%m%d'))
cursor -= one_day
_log.info('Requesting chant updates for {}'.format(post))
return post
def update_db(updates_db, rtype, time):
'''
Revise the updates database to show a new "last updated" time for a resource type.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:param str rtype: The resource type that was updated.
:param time: The time at which the resource type is current.
:type time: :class:`datetime.datetime`
While it's tempting to think that the ``time`` argument should correspond to the moment this
function is called, that's not true---especially for resource types that take considerable time
to update (chants). Therefore the :class:`datetime` given to this function should correspond
to the moment just before data are requested from Drupal.
'''
time = time.isoformat()
updates_db.cursor().execute('UPDATE rtypes SET updated=? WHERE name=?;', (time, rtype))
updates_db.commit()
|
CANTUS-Project/abbot
|
holy_orders/current.py
|
Python
|
gpl-3.0
| 6,147
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a D7 Networks account from their website
# at https://d7networks.com/
#
# After you've established your account you can get your api login credentials
# (both user and password) from the API Details section from within your
# account profile area: https://d7networks.com/accounts/profile/
import re
import six
import requests
import base64
from json import dumps
from json import loads
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
D7NETWORKS_HTTP_ERROR_MAP = {
401: 'Invalid Argument(s) Specified.',
403: 'Unauthorized - Authentication Failure.',
412: 'A Routing Error Occured',
500: 'A Serverside Error Occured Handling the Request.',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
# Priorities
class D7SMSPriority(object):
"""
D7 Networks SMS Message Priority
"""
LOW = 0
MODERATE = 1
NORMAL = 2
HIGH = 3
D7NETWORK_SMS_PRIORITIES = (
D7SMSPriority.LOW,
D7SMSPriority.MODERATE,
D7SMSPriority.NORMAL,
D7SMSPriority.HIGH,
)
class NotifyD7Networks(NotifyBase):
"""
A wrapper for D7 Networks Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'D7 Networks'
# The services URL
service_url = 'https://d7networks.com/'
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
# D7 Networks single notification URL
notify_url = 'http://rest-api.d7networks.com/secure/send'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{user}:{password}@{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('Username'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
'required': True,
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'min': D7SMSPriority.LOW,
'max': D7SMSPriority.HIGH,
'values': D7NETWORK_SMS_PRIORITIES,
# The website identifies that the default priority is low; so
# this plugin will honor that same default
'default': D7SMSPriority.LOW,
},
'batch': {
'name': _('Batch Mode'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
'source': {
# Originating address,In cases where the rewriting of the sender's
# address is supported or permitted by the SMS-C. This is used to
# transmit the message, this number is transmitted as the
# originating address and is completely optional.
'name': _('Originating Address'),
'type': 'string',
'map_to': 'source',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Depending on whether we are set to batch mode or single mode this
redirects to the appropriate handling
"""
# error tracking (used for function return)
has_error = False
auth = '{user}:{password}'.format(
user=self.user, password=self.password)
if six.PY3:
# Python 3's versio of b64encode() expects a byte array and not
# a string. To accomodate this, we encode the content here
auth = auth.encode('utf-8')
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
'Authorization': 'Basic {}'.format(base64.b64encode(auth))
}
# Our URL varies depending if we're doing a batch mode or not
url = self.notify_batch_url if self.batch else self.notify_url
# use the list directly
targets = list(self.targets)
while len(targets):
if self.batch:
# Prepare our payload
payload = {
'globals': {
'priority': self.priority,
'from': self.source if self.source else self.app_id,
},
'messages': [{
'to': self.targets,
'content': body,
}],
}
# Reset our targets so we don't keep going. This is required
# because we're in batch mode; we only need to loop once.
targets = []
else:
# We're not in a batch mode; so get our next target
# Get our target(s) to notify
target = targets.pop(0)
# Prepare our payload
payload = {
'priority': self.priority,
'content': body,
'to': target,
'from': self.source if self.source else self.app_id,
}
# Some Debug Logging
self.logger.debug(
'D7 Networks POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('D7 Networks Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(
r.status_code, D7NETWORKS_HTTP_ERROR_MAP)
try:
# Update our status response if we can
json_response = loads(r.content)
status_str = json_response.get('message', status_str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
# We will just use the status we already have.
pass
self.logger.warning(
'Failed to send D7 Networks SMS notification to {}: '
'{}{}error={}.'.format(
', '.join(target) if self.batch else target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
if self.batch:
count = len(self.targets)
try:
# Get our message delivery count if we can
json_response = loads(r.content)
count = int(json_response.get(
'data', {}).get('messageCount', -1))
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response. Assume that
# our delivery is okay for now.
pass
if count != len(self.targets):
has_error = True
self.logger.info(
'Sent D7 Networks batch SMS notification to '
'{} of {} target(s).'.format(
count, len(self.targets)))
else:
self.logger.info(
'Sent D7 Networks SMS notification to {}.'.format(
target))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending D7 Networks:%s ' % (
', '.join(self.targets)) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'batch': 'yes' if self.batch else 'no',
}
if self.priority != self.template_args['priority']['default']:
args['priority'] = str(self.priority)
if self.source:
args['from'] = self.source
return '{schema}://{user}:{password}@{targets}/?{args}'.format(
schema=self.secure_protocol,
user=NotifyD7Networks.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
targets='/'.join(
[NotifyD7Networks.quote(x, safe='') for x in self.targets]),
args=NotifyD7Networks.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Initialize our targets
results['targets'] = list()
# The store our first target stored in the hostname
results['targets'].append(NotifyD7Networks.unquote(results['host']))
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'].extend(
NotifyD7Networks.split_path(results['fullpath']))
# Set our priority
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
_map = {
'l': D7SMSPriority.LOW,
'0': D7SMSPriority.LOW,
'm': D7SMSPriority.MODERATE,
'1': D7SMSPriority.MODERATE,
'n': D7SMSPriority.NORMAL,
'2': D7SMSPriority.NORMAL,
'h': D7SMSPriority.HIGH,
'3': D7SMSPriority.HIGH,
}
try:
results['priority'] = \
_map[results['qsd']['priority'][0].lower()]
except KeyError:
# No priority was set
pass
# Support the 'from' and 'source' variable so that we can support
# targets this way too.
# The 'from' makes it easier to use yaml configuration
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['source'])
# Get Batch Mode Flag
results['batch'] = \
parse_bool(results['qsd'].get('batch', False))
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyD7Networks.parse_list(results['qsd']['to'])
return results
|
SickGear/SickGear
|
lib/apprise/plugins/NotifyD7Networks.py
|
Python
|
gpl-3.0
| 16,906
|
# shipSPTOptimalBonusMF
#
# Used by:
# Ship: Chremoas
type = "passive"
def handler(fit, ship, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Small Projectile Turret"),
"maxRange", ship.getModifiedItemAttr("shipBonusMF"), skill="Minmatar Frigate")
|
Ebag333/Pyfa
|
eos/effects/shipsptoptimalbonusmf.py
|
Python
|
gpl-3.0
| 316
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field followed_by on 'Exercise'
db.delete_table('exercise_followed_by')
def backwards(self, orm):
# Adding M2M table for field followed_by on 'Exercise'
db.create_table(u'exercise_followed_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('exercise', models.ForeignKey(orm['askbot.exercise'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(u'exercise_followed_by', ['exercise_id', 'user_id'])
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('problem', 'revision'), ('exercise', 'revision'))", 'object_name': 'PostRevision'},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_exercises'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteExercise']", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'exercises'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_problem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Problem']", 'null': 'True', 'blank': 'True'}),
'problem_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
maxwward/SCOPEBak
|
askbot/migrations/0076_transplant_followed_by_2.py
|
Python
|
gpl-3.0
| 27,054
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <a000680@c14526.ad.smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unittests reading various PPS data files
"""
import os
import unittest
from pypps_reader import NwcSafPpsData
# The test data cases:
# FIXME!
class Test(unittest.TestCase):
"""Unit testing the pps reading"""
def setUp(self):
"""Set up"""
return
def test_read(self):
"""Test that it is possible to read pps products"""
return
def tearDown(self):
"""Clean up"""
return
|
adybbroe/pypps_reader
|
test/test_ppsread.py
|
Python
|
gpl-3.0
| 1,228
|
import os
import sys
import signal
import logging
import pytest
import latus.logger
import test_latus.tstutil
os.environ["PYTHONPATH"] = '.'
g_keep_running = True
def control_key_handler(signal, frame):
global g_keep_running
print('%s : ctrl-c detected - exiting' % __file__)
g_keep_running = False
def run_pytest_until_error():
global g_keep_running
test_latus.tstutil.set_cloud_config('aws', True)
g_keep_running = True
latus.logger.init(os.path.join('temp', __file__), 'log')
latus.logger.set_console_log_level(logging.INFO)
print('hit ctrl-c to exit')
signal.signal(signal.SIGINT, control_key_handler)
signal.signal(signal.SIGTSTP, control_key_handler)
count = {'fail': 0, 'pass': 0}
# target = os.path.join('test_latus', 'test_delete.py::test_delete')
while g_keep_running:
if len(sys.argv) > 1:
r = pytest.main(sys.argv[1]) # command line '-s' to see output
else:
r = pytest.main()
if r != 0:
count['fail'] += 1
print('pytest got an error - exiting')
g_keep_running = False
else:
count['pass'] += 1
print('test iteration : %s : return=%s' % (str(count), str(r)))
def main():
run_pytest_until_error()
if __name__ == '__main__':
main()
|
latusrepo/latus
|
run_pytest_until_error.py
|
Python
|
gpl-3.0
| 1,334
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import babelfish
import requests
from . import Provider
from .. import __version__
from ..exceptions import InvalidSubtitle, ProviderNotAvailable, ProviderError
from ..subtitle import Subtitle, is_valid_subtitle, detect
logger = logging.getLogger(__name__)
class TheSubDBSubtitle(Subtitle):
provider_name = 'thesubdb'
def __init__(self, language, hash): # @ReservedAssignment
super(TheSubDBSubtitle, self).__init__(language)
self.hash = hash
def compute_matches(self, video):
matches = set()
# hash
if 'thesubdb' in video.hashes and video.hashes['thesubdb'] == self.hash:
matches.add('hash')
return matches
class TheSubDBProvider(Provider):
languages = set([babelfish.Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']])
required_hash = 'thesubdb'
def initialize(self):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' %
__version__}
def terminate(self):
self.session.close()
def get(self, params):
"""Make a GET request on the server with the given parameters
:param params: params of the request
:return: the response
:rtype: :class:`requests.Response`
:raise: :class:`~subliminal.exceptions.ProviderNotAvailable`
"""
try:
r = self.session.get('http://api.thesubdb.com', params=params, timeout=10)
except requests.Timeout:
raise ProviderNotAvailable('Timeout after 10 seconds')
return r
def query(self, hash): # @ReservedAssignment
params = {'action': 'search', 'hash': hash}
logger.debug('Searching subtitles %r', params)
r = self.get(params)
if r.status_code == 404:
logger.debug('No subtitle found')
return []
elif r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
return [TheSubDBSubtitle(language, hash) for language in
set([babelfish.Language.fromalpha2(l) for l in r.content.split(',')])]
def list_subtitles(self, video, languages):
return [s for s in self.query(video.hashes['thesubdb']) if s.language in languages]
def download_subtitle(self, subtitle):
params = {'action': 'download', 'hash': subtitle.hash, 'language': subtitle.language.alpha2}
r = self.get(params)
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
logger.debug('Download URL: %s {hash=%s, lang=%s}' % (
'http://api.thesubdb.com', subtitle.hash, subtitle.language.alpha2,
))
subtitle_text = r.content.decode(
detect(r.content, subtitle.language.alpha2)['encoding'], 'replace')
if not is_valid_subtitle(subtitle_text):
raise InvalidSubtitle
return subtitle_text
|
caronc/nzb-subliminal
|
Subliminal/subliminal/providers/thesubdb.py
|
Python
|
gpl-3.0
| 3,140
|
del x
|
pyta-uoft/pyta
|
examples/ending_locations/del_name.py
|
Python
|
gpl-3.0
| 6
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class ResCompany(models.Model):
_inherit = "res.company"
@api.model
def create(self, vals):
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
pricelist = ProductPricelist.create({
'name': new_company.name,
'currency_id': new_company.currency_id.id,
})
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'property_product_pricelist')])
self.env['ir.property'].create({
'name': 'property_product_pricelist',
'company_id': new_company.id,
'value_reference': 'product.pricelist,%s' % pricelist.id,
'fields_id': field_id.id
})
return new_company
|
ChawalitK/odoo
|
addons/product/res_company.py
|
Python
|
gpl-3.0
| 1,090
|
from __future__ import absolute_import
from django.contrib.auth.models import User, Group
from navigation.api import register_multi_item_links
from navigation.classes import Link
from project_setup.api import register_setup
from .links import (user_list, user_setup, user_edit, user_add, user_delete,
user_multiple_delete, user_set_password, user_multiple_set_password,
user_groups, group_list, group_setup, group_edit, group_add,
group_delete, group_multiple_delete, group_members)
Link.bind_links([User], [user_edit, user_set_password, user_groups, user_delete])
Link.bind_links([User, 'user_multiple_set_password', 'user_multiple_delete', 'user_list', 'user_add'], [user_list, user_add], menu_name=u'secondary_menu')
register_multi_item_links(['user_list'], [user_multiple_set_password, user_multiple_delete])
Link.bind_links([Group], [group_edit, group_members, group_delete])
Link.bind_links(['group_multiple_delete', 'group_delete', 'group_edit', 'group_list', 'group_add', 'group_members'], [group_list, group_add], menu_name=u'secondary_menu')
register_multi_item_links(['group_list'], [group_multiple_delete])
user_management_views = [
'user_list', 'user_edit', 'user_add', 'user_delete',
'user_multiple_delete', 'user_set_password',
'user_multiple_set_password', 'group_list', 'group_edit', 'group_add',
'group_delete', 'group_multiple_delete', 'group_members'
]
register_setup(user_setup)
register_setup(group_setup)
|
commonwealth-of-puerto-rico/lean
|
paart/apps/user_management/__init__.py
|
Python
|
gpl-3.0
| 1,463
|
#!/usr/bin/env python
#-*- coding: utf-8 -
import keystoneclient.v2_0.client as keystone
from keystoneauth1.identity import v2
from keystoneauth1 import session
import novaclient.client as nova
import cinderclient.client as cinder
from glanceclient.v1 import client as glance
import neutronclient.v2_0.client as neutron
import heatclient.client as heat
import time, paramiko,os,re,errno
from socket import error as socket_error
from os import environ as env
class OpenStackUtils():
def __init__(self):
auth = v2.Password(auth_url=env['OS_AUTH_URL'],
username=env['OS_USERNAME'],
password=env['OS_PASSWORD'],
tenant_id=env['OS_TENANT_ID'])
sess = session.Session(auth=auth)
self.keystone_client = keystone.Client(username=env['OS_USERNAME'],
password=env['OS_PASSWORD'],
tenant_id=env['OS_TENANT_ID'],
auth_url=env['OS_AUTH_URL'],
region_name=env['OS_REGION_NAME'])
heat_url = self.keystone_client \
.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
self.nova_client = nova.Client('2.1', region_name=env['OS_REGION_NAME'], session=sess)
self.cinder_client = cinder.Client('2', region_name=env['OS_REGION_NAME'], session=sess)
self.glance_client = glance.Client('2', region_name=env['OS_REGION_NAME'], session=sess)
self.neutron_client = neutron.Client(region_name=env['OS_REGION_NAME'], session=sess)
self.heat_client = heat.Client('1', region_name=env['OS_REGION_NAME'], endpoint=heat_url, session=sess)
def boot_vm_with_userdata_and_port(self,userdata_path,keypair,port):
#nics = [{'port-id': env['NOSE_PORT_ID']}]
nics = [{'port-id': port['port']['id'] }]
server = self.nova_client.servers.create(name="test-server-" + self.current_time_ms(), image=env['NOSE_IMAGE_ID'],
flavor=env['NOSE_FLAVOR'],userdata=file(userdata_path),key_name=keypair.name, nics=nics)
print 'Building, please wait...'
# wait for server create to be complete
self.wait_server_is_up(server)
self.wait_for_cloud_init(server)
return server
def boot_vm(self,image_id=env['NOSE_IMAGE_ID'],flavor=env['NOSE_FLAVOR'],keypair='default'):
nics = [{'net-id': env['NOSE_NET_ID']}]
server = self.nova_client.servers.create(name="test-server-" + self.current_time_ms(), image=image_id,security_groups=[env['NOSE_SG_ID']],
flavor=flavor, key_name=keypair.name, nics=nics)
print 'Building, please wait...'
self.wait_server_is_up(server)
self.wait_for_cloud_init(server)
return server
def get_server(self,server_id):
return self.nova_client.servers.get(server_id)
def destroy_server(self,server):
self.nova_client.servers.delete(server)
time.sleep(30)
def current_time_ms(self):
return str(int(round(time.time() * 1000)))
def get_console_log(self,server):
return self.nova_client.servers.get(server.id).get_console_output(length=600)
def get_spice_console(self,server):
return self.nova_client.servers.get(server.id).get_spice_console('spice-html5')
def create_server_snapshot(self,server):
return self.nova_client.servers.create_image(server,server.name+self.current_time_ms())
def get_image(self,image_id):
return self.glance_client.images.get(image_id)
def destroy_image(self,image_id):
self.glance_client.images.delete(image_id)
def initiate_ssh(self,floating_ip,private_key_filename):
ssh_connection = paramiko.SSHClient()
ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries_left = 5
while True:
try:
ssh_connection.connect(floating_ip.ip,username='cloud',key_filename=private_key_filename,timeout=180)
break
except socket_error as e:
if e.errno != errno.ECONNREFUSED or retries_left <= 1:
raise e
time.sleep(10) # wait 10 seconds and retry
retries_left -= 1
return ssh_connection
def create_floating_ip(self):
return self.nova_client.floating_ips.create('public')
#def associate_floating_ip_to_port(self,floating_ip):
# self.neutron_client.update_floatingip(floating_ip.id,{'floatingip': {'port_id': env['NOSE_PORT_ID'] }})
def associate_floating_ip_to_server(self,floating_ip, server):
self.nova_client.servers.get(server.id).add_floating_ip(floating_ip.ip)
time.sleep(10)
def delete_floating_ip(self,floating_ip):
self.nova_client.floating_ips.delete(floating_ip.id)
def rescue(self,server):
self.wait_server_available(server)
return self.nova_client.servers.get(server.id).rescue()
def unrescue(self,server):
self.wait_server_available(server)
return self.nova_client.servers.get(server.id).unrescue()
def attach_volume_to_server(self,server,volume):
#self.nova_client.volumes.create_server_volume(server_id=server.id,volume_id=env['NOSE_VOLUME_ID'])
self.nova_client.volumes.create_server_volume(server_id=server.id,volume_id=volume.id)
status =volume.status
while status != 'in-use':
status = self.cinder_client.volumes.get(volume.id).status
print status
print "volume is in use Now : "+ status
def detach_volume_from_server(self,server,volume):
#self.nova_client.volumes.delete_server_volume(server.id,env['NOSE_VOLUME_ID'])
self.nova_client.volumes.delete_server_volume(server.id,volume.id)
def get_flavor_disk_size(self,flavor_id):
return self.nova_client.flavors.get(flavor_id).disk
def server_reboot(self,server,type):
serv=self.get_server(server.id)
serv.reboot(reboot_type=type)
def wait_server_is_up(self,server):
status = server.status
while status != 'ACTIVE':
status = self.get_server(server.id).status
print "server is up"
def wait_for_cloud_init(self,server):
while True:
console_log = self.get_console_log(server)
if re.search('^.*Cloud-init .* finished.*$', console_log, flags=re.MULTILINE):
print("Cloudinit finished")
break
else:
time.sleep(10)
def wait_server_available(self,server):
task_state = getattr(server,'OS-EXT-STS:task_state')
while task_state is not None:
task_state = getattr(self.get_server(server.id),'OS-EXT-STS:task_state')
print "the server is available"
def create_keypair(self):
suffix =self.current_time_ms()
keypair= self.nova_client.keypairs.create(name="nose_keypair"+suffix)
private_key_filename = env['HOME']+'/key-'+suffix+'.pem'
fp = os.open(private_key_filename, os.O_WRONLY | os.O_CREAT, 0o600)
with os.fdopen(fp, 'w') as f:
f.write(keypair.private_key)
return keypair , private_key_filename
def delete_keypair(self,keypair,private_key_filename):
self.nova_client.keypairs.delete(keypair.id)
os.remove(private_key_filename)
def create_port_with_sg(self):
body_value = {'port': {
'admin_state_up': True,
'security_groups': [env['NOSE_SG_ID']],
'name': 'port-test'+self.current_time_ms(),
'network_id': env['NOSE_NET_ID'],
}}
port=self.neutron_client.create_port(body=body_value)
time.sleep(20)
return port
def delete_port(self,port):
self.neutron_client.delete_port(port['port']['id'])
def create_volume(self):
volume=self.cinder_client.volumes.create(5, name="test-volume"+self.current_time_ms())
print "the status of volume is:"+ volume.status
status = volume.status
while status != 'available':
status = self.cinder_client.volumes.get(volume.id).status
print "volume is created : "+ status
return volume
def delete_volume(self,volume):
self.cinder_client.volumes.delete(volume.id)
|
juliend88/os_image_factory
|
test-tools/pytesting_os/openstackutils.py
|
Python
|
gpl-3.0
| 8,608
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import re
import os
from typing import (
NamedTuple, Optional
)
# project
from kiwi.command import Command
from kiwi.exceptions import KiwiKernelLookupError
kernel_type = NamedTuple(
'kernel_type', [
('name', str),
('filename', str),
('version', str)
]
)
xen_hypervisor_type = NamedTuple(
'xen_hypervisor_type', [
('filename', str),
('name', str)
]
)
class Kernel:
"""
**Implementes kernel lookup and extraction from given root tree**
:param str root_dir: root directory path name
:param list kernel_names: list of kernel names to search for
functions.sh::suseStripKernel() provides a normalized
file so that we do not have to search for many different
names in this code
"""
def __init__(self, root_dir: str):
self.root_dir = root_dir
self.kernel_names = self._setup_kernel_names_for_lookup()
def get_kernel(
self, raise_on_not_found: bool = False
) -> Optional[kernel_type]:
"""
Lookup kernel files and provide filename and version
:param bool raise_on_not_found: sets the method to raise an exception
if the kernel is not found
:raises KiwiKernelLookupError: if raise_on_not_found flag is active
and kernel is not found
:return: tuple with filename, kernelname and version
:rtype: tuple|None
"""
for kernel_name in self.kernel_names:
kernel_file = os.sep.join(
[self.root_dir, 'boot', kernel_name]
)
if os.path.exists(kernel_file):
version_match = re.match(
'.*?-(.*)', os.path.basename(kernel_file)
)
if version_match:
version = version_match.group(1)
return kernel_type(
name=os.path.basename(os.path.realpath(kernel_file)),
filename=kernel_file,
version=version
)
if raise_on_not_found:
raise KiwiKernelLookupError(
'No kernel found in {0}, searched for {1}'.format(
os.sep.join([self.root_dir, 'boot']),
','.join(self.kernel_names)
)
)
return None
def get_xen_hypervisor(self) -> Optional[xen_hypervisor_type]:
"""
Lookup xen hypervisor and provide filename and hypervisor name
:return: tuple with filename and hypervisor name
:rtype: tuple|None
"""
xen_hypervisor = self.root_dir + '/boot/xen.gz'
if os.path.exists(xen_hypervisor):
return xen_hypervisor_type(
filename=xen_hypervisor,
name='xen.gz'
)
return None
def copy_kernel(self, target_dir: str, file_name: str = None) -> None:
"""
Copy kernel to specified target
If no file_name is given the target filename is set
as kernel-<kernel.version>.kernel
:param str target_dir: target path name
:param str filename: base filename in target
"""
kernel = self.get_kernel()
if kernel:
if not file_name:
file_name = 'kernel-' + kernel.version + '.kernel'
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', kernel.filename, target_file])
def copy_xen_hypervisor(
self, target_dir: str, file_name: str = None
) -> None:
"""
Copy xen hypervisor to specified target
If no file_name is given the target filename is set
as hypervisor-<xen.name>
:param str target_dir: target path name
:param str filename: base filename in target
"""
xen = self.get_xen_hypervisor()
if xen:
if not file_name:
file_name = 'hypervisor-' + xen.name
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', xen.filename, target_file])
def _setup_kernel_names_for_lookup(self):
"""
The kernel image name is different per arch and distribution
This method returns a list of possible kernel image names in
order to search and find one of them
:return: list of kernel image names
:rtype: list
"""
kernel_names = []
kernel_dirs = sorted(
os.listdir(''.join([self.root_dir, '/lib/modules']))
)
if kernel_dirs:
# append lookup for the real kernel image names
# depending on the arch and os they are different
# in their prefix
kernel_prefixes = [
'uImage', 'Image', 'zImage', 'vmlinuz', 'image', 'vmlinux'
]
kernel_name_pattern = '{prefix}-{name}'
for kernel_prefix in kernel_prefixes:
for kernel_dir in kernel_dirs:
kernel_names.append(
kernel_name_pattern.format(
prefix=kernel_prefix, name=kernel_dir
)
)
return kernel_names
|
SUSE/kiwi
|
kiwi/system/kernel.py
|
Python
|
gpl-3.0
| 5,997
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import csv
from copy import copy
from typing import List, Dict
import typesystem
from .types import PartialActor, ActorIssue, PartialIssue, IssuePosition, IssueDescription, Comment
types = {
PartialActor.starts_with: PartialActor,
ActorIssue.starts_with: ActorIssue,
PartialIssue.starts_with: PartialIssue,
IssuePosition.starts_with: IssuePosition,
IssueDescription.starts_with: IssueDescription,
Comment.starts_with: Comment,
}
class InputDataFile:
def __init__(self):
self.errors = {}
self.rows = {}
self.data = {}
def add_typed_object(self, obj):
klass = obj.__class__
if klass in self.data:
self.data[klass][obj] = obj
else:
self.data[klass] = {obj: obj}
@classmethod
def open(cls, filename: str) -> "InputDataFile":
"""
Transforms a file with comma separated values to a dictionary where the key is the row number
"""
data = cls()
with open(filename, "rt", encoding="utf-8", errors="replace") as csv_file:
# guess the document format
dialect = csv.Sniffer().sniff(csv_file.read(1024))
csv_file.seek(0)
reader = csv.reader(csv_file, dialect=dialect)
InputDataFile.open_reader(reader, data)
return data
@classmethod
def open_reader(cls, reader, data=None):
if not data:
data = cls()
data.parse_rows(reader)
data.update_issues_with_positions()
if data.is_valid:
data.validate_actor_issue_positions()
return data
def parse_rows(self, items):
for index, row in enumerate(items):
# keep the original data
self.rows[index] = row
try:
type_obj = csv_row_to_type(row)
self.add_typed_object(type_obj)
except typesystem.ValidationError as e:
self.errors[index] = e # collect the error for displaying purpose
@property
def is_valid(self):
return len(self.errors) == 0
@property
def actors(self) -> Dict[str, PartialActor]:
return self.data[PartialActor]
@property
def issues(self) -> Dict[str, PartialIssue]:
return self.data[PartialIssue]
@property
def actor_issues(self) -> Dict[str, ActorIssue]:
return self.data[ActorIssue]
@property
def issue_positions(self) -> Dict[str, IssuePosition]:
return self.data[IssuePosition]
def update_issues_with_positions(self):
"""
Once the file is complete, we can update the lower an upper positions of the issue
"""
if IssuePosition in self.data:
for issue_position in self.issue_positions.values():
if issue_position.issue in self.issues:
issue = self.issues[issue_position.issue]
if issue.lower is None:
issue.lower = issue_position.position
elif issue_position.position < issue.lower:
issue.lower = issue_position
if issue.upper is None:
issue.upper = issue_position.position
elif issue_position.position > issue.upper:
issue.upper = issue_position.position
self.set_default_issue_positions()
def set_default_issue_positions(self):
for issue in self.issues.values():
if issue.lower is None:
issue.lower = 0
if issue.upper is None:
issue.upper = 100
def validate_actor_issue_positions(self):
"""
Validate the positions of the actor issues against the lower & upper issue bounds
"""
# find the starting position of the actor issues, so we can show the error at the correct position
row_index_correction = 0
for type_class in types.values():
if type_class in self.data and type_class != ActorIssue:
row_index_correction += len(self.data[type_class])
for index, actor_issue in enumerate(self.actor_issues.values(), row_index_correction + 1):
if actor_issue.actor not in self.actors:
self.errors[index] = typesystem.ValidationError(
key='actor',
text='{} not found in document'.format(actor_issue.actor)
)
if actor_issue.issue in self.issues:
issue = self.issues[actor_issue.issue]
try:
actor_issue.validate_position(issue)
except typesystem.ValidationError as e:
if index in self.errors:
self.errors[index] = e
else:
self.errors[index] = e
else:
self.errors[index] = typesystem.ValidationError(
key='issue',
text='{} not found document'.format(actor_issue.issue)
)
def csv_row_to_type(row: List[str]):
"""
Translate a list of values to the corresponding object
"""
key = row[0] # the first element contains the #id field
row = row[1:] # the rest the row
if key not in types.keys():
raise Exception(f"Add key {key} to Reader.types (row row: {row}")
row_type = types[key]
field_names = row_type.fields.keys()
row = squash(len(row_type.fields), row)
obj = row_type.validate(dict(zip(field_names, row)))
return obj
def squash(fields: int, data: List[str], delimiter=" ") -> List[str]:
"""
Finds out how many fields there are and joins the overhead in to the lasted field
i.e:
The object x, y, z contains 3 field.
The row x,y,z,a,b has 5 values.
The values a & b will be squashed to z with the given delimiter
"""
if fields >= len(data):
return data
output = copy(data)
del output[-1]
output[-1] = delimiter.join(data[fields - 1:])
return output
|
foarsitter/equal-gain-python
|
decide/data/reader.py
|
Python
|
gpl-3.0
| 6,159
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 09:53:39 2018
@author: mayank
"""
from forms import SignupForm
from flask import Flask, request, render_template
from flask_login import LoginManager, login_user, login_required, logout_user
app = Flask(__name__)
app.secret_key = 'gMALVWEuxBSxQ44bomDOsWniejrPbhDV'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/database.sqlite'
login_manager = LoginManager()
login_manager.init_app(app)
@app.route('/')
def index():
return "Welcome to Home Page"
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
if request.method == 'GET':
return render_template('signup.html', form=form)
elif request.method == 'POST':
if form.validate_on_submit():
if User.query.filter_by(email=form.email.data).first():
return "!!! Email address already exists !!!"
newuser = User(form.email.data, form.password.data)
db.session.add(newuser)
db.session.flush()
db.session.commit()
login_user(newuser)
return "User created!!!"
else:
return "Form didn't validate"
@login_manager.user_loader
def load_user(email):
return User.query.filter_by(email=email).first()
@app.route('/login', methods=['GET', 'POST'])
def login():
form = SignupForm()
if request.method == 'GET':
return render_template('login.html', form=form)
elif request.method == 'POST' and form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.password == form.password.data:
login_user(user)
return "User logged in"
return "<h1>Wrong username or password</h1>"
return "form not validated or invalid request"
@app.route("/logout")
@login_required
def logout():
logout_user()
return "Logged out"
@app.route('/protected')
@login_required
def protected():
return "protected area"
def init_db():
db.init_app(app)
db.app = app
db.create_all()
if __name__ == '__main__':
from models import db, User
init_db()
app.run(port=5000, host='localhost')
|
mayankjohri/LetsExplorePython
|
Section 2 - Advance Python/Chapter S2.06 - Web Development/code/flask/flask_login/sample_2/app.py
|
Python
|
gpl-3.0
| 2,215
|
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from dgw.evaluation.resampling import extend_point, shrink_to_a_single_point
class TestExtending(unittest.TestCase):
def test_extend_point(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ans = np.array([0, 1, 2, 3, 4, 4, 4, 5, 6, 7, 8, 9, 10])
assert_array_equal(ans, extend_point(a, 4, 3))
# multi-dim
a = np.array([[1, 2], [2, 3], [3, 4]])
ans = np.array([[1, 2], [2, 3], [2, 3], [3, 4]])
assert_array_equal(ans, extend_point(a, 1, 2))
def test_extend_point_left_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
ans = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8])
assert_array_equal(ans, extend_point(a, 0, 4))
def test_extend_point_right_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
ans = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8])
assert_array_equal(ans, extend_point(a, 8, 4))
def test_shrink_to_single_point(self):
a = np.array([0, 1, 2, 3, 3, 3, 3, 4, 5, 6])
ans = np.array([0, 1, 2, 3, 4, 5, 6])
assert_array_equal(ans, shrink_to_a_single_point(a, 3, 4))
b = np.array([0, 1, 2, 3, 8, 9, 10, 4, 5, 6])
ans = np.array([0, 1, 2, np.mean([3, 8, 9, 10]), 4, 5, 6])
assert_array_equal(ans, shrink_to_a_single_point(b, 3, 4))
# multi-dim
a = np.array([[1, 2], [2, 3], [2, 3], [3, 4]])
ans = np.array([[1, 2], [2, 3], [3, 4]])
assert_array_equal(ans, shrink_to_a_single_point(a, 1, 2))
def test_shrink_to_single_point_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 6])
ans = np.array([0, 1, 2, 3, 4, 5, 6]) # Ignore points that go out of bound
assert_array_equal(ans, shrink_to_a_single_point(a, 6, 4))
|
lukauskas/dgw
|
dgw/evaluation/tests/test_resampling.py
|
Python
|
gpl-3.0
| 1,877
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog for editing IRC network definitions.
"""
from __future__ import unicode_literals
import copy
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QTreeWidgetItem
from E5Gui import E5MessageBox
from .Ui_IrcNetworkEditDialog import Ui_IrcNetworkEditDialog
import UI.PixmapCache
class IrcNetworkEditDialog(QDialog, Ui_IrcNetworkEditDialog):
"""
Class implementing a dialog for editing IRC network definitions.
"""
def __init__(self, manager, networkName, parent=None):
"""
Constructor
@param manager reference to the IRC network manager object
(IrcNetworkManager)
@param networkName name of the network to work on (string)
@param parent reference to the parent widget (QWidget)
"""
super(IrcNetworkEditDialog, self).__init__(parent)
self.setupUi(self)
self.__manager = manager
self.editIdentitiesButton.setIcon(
UI.PixmapCache.getIcon("ircConfigure.png"))
self.editServerButton.setIcon(
UI.PixmapCache.getIcon("ircConfigure.png"))
self.editChannelButton.setIcon(
UI.PixmapCache.getIcon("ircConfigure.png"))
self.addChannelButton.setIcon(UI.PixmapCache.getIcon("plus.png"))
self.deleteChannelButton.setIcon(UI.PixmapCache.getIcon("minus.png"))
self.__okButton = self.buttonBox.button(QDialogButtonBox.Ok)
if networkName:
self.__network = copy.deepcopy(
self.__manager.getNetwork(networkName))
else:
from .IrcNetworkManager import IrcNetwork
self.__network = IrcNetwork("")
# network name
self.networkEdit.setText(networkName)
# identities
self.__refreshIdentityCombo(self.__network.getIdentityName())
# server
self.serverEdit.setText(self.__network.getServerName())
# channels
for channelName in sorted(self.__network.getChannelNames()):
channel = self.__network.getChannel(channelName)
if channel.autoJoin():
autoJoin = self.tr("Yes")
else:
autoJoin = self.tr("No")
QTreeWidgetItem(self.channelList, [channelName, autoJoin])
self.__updateOkButton()
self.on_channelList_itemSelectionChanged()
def __updateOkButton(self):
"""
Private method to update the OK button state.
"""
enable = True
enable &= self.networkEdit.text() != ""
enable &= self.serverEdit.text() != ""
self.__okButton.setEnabled(enable)
@pyqtSlot(str)
def on_networkEdit_textChanged(self, txt):
"""
Private slot to handle changes of the network name.
@param txt text entered into the network name edit (string)
"""
self.__updateOkButton()
def __refreshIdentityCombo(self, currentIdentity):
"""
Private method to refresh the identity combo.
@param currentIdentity name of the identity to select (string)
"""
self.identityCombo.clear()
from .IrcNetworkManager import IrcIdentity
identities = list(sorted(self.__manager.getIdentityNames()))
identities[identities.index(IrcIdentity.DefaultIdentityName)] = \
IrcIdentity.DefaultIdentityDisplay
self.identityCombo.addItems(identities)
if currentIdentity == IrcIdentity.DefaultIdentityName:
currentIdentity = IrcIdentity.DefaultIdentityDisplay
index = self.identityCombo.findText(currentIdentity)
if index == -1:
index = 0
self.identityCombo.setCurrentIndex(index)
@pyqtSlot(str)
def on_identityCombo_currentIndexChanged(self, identity):
"""
Private slot to handle the selection of an identity.
@param identity selected entity (string)
"""
from .IrcNetworkManager import IrcIdentity
if identity == IrcIdentity.DefaultIdentityDisplay:
identity = IrcIdentity.DefaultIdentityName
self.__network.setIdentityName(identity)
@pyqtSlot()
def on_editIdentitiesButton_clicked(self):
"""
Private slot to edit the identities.
"""
from .IrcIdentitiesEditDialog import IrcIdentitiesEditDialog
currentIdentity = self.identityCombo.currentText()
dlg = IrcIdentitiesEditDialog(self.__manager, currentIdentity, self)
dlg.exec_()
self.__refreshIdentityCombo(currentIdentity)
@pyqtSlot(str)
def on_serverEdit_textChanged(self, txt):
"""
Private slot to handle changes of the server name.
@param txt text entered into the server name edit (string)
"""
self.__updateOkButton()
@pyqtSlot()
def on_editServerButton_clicked(self):
"""
Private slot to edit the server configuration.
"""
from .IrcServerEditDialog import IrcServerEditDialog
dlg = IrcServerEditDialog(self.__network.getServer())
if dlg.exec_() == QDialog.Accepted:
self.__network.setServer(dlg.getServer())
self.serverEdit.setText(self.__network.getServerName())
@pyqtSlot()
def on_addChannelButton_clicked(self):
"""
Private slot to add a channel.
"""
self.__editChannel(None)
@pyqtSlot()
def on_editChannelButton_clicked(self):
"""
Private slot to edit the selected channel.
"""
itm = self.channelList.selectedItems()[0]
if itm:
self.__editChannel(itm)
@pyqtSlot()
def on_deleteChannelButton_clicked(self):
"""
Private slot to delete the selected channel.
"""
itm = self.channelList.selectedItems()[0]
if itm:
res = E5MessageBox.yesNo(
self,
self.tr("Delete Channel"),
self.tr(
"""Do you really want to delete channel <b>{0}</b>?""")
.format(itm.text(0)))
if res:
self.__network.deleteChannel(itm.text(0))
index = self.channelList.indexOfTopLevelItem(itm)
self.channelList.takeTopLevelItem(index)
del itm
@pyqtSlot(QTreeWidgetItem, int)
def on_channelList_itemActivated(self, item, column):
"""
Private slot to handle the activation of a channel entry.
@param item reference to the activated item (QTreeWidgetItem)
@param column column the activation occurred in (integer)
"""
self.__editChannel(item)
@pyqtSlot()
def on_channelList_itemSelectionChanged(self):
"""
Private slot to handle changes of the selection of channels.
"""
selectedItems = self.channelList.selectedItems()
if len(selectedItems) == 0:
enable = False
else:
enable = True
self.editChannelButton.setEnabled(enable)
self.deleteChannelButton.setEnabled(enable)
def __editChannel(self, itm):
"""
Private method to edit a channel.
@param itm reference to the item to be edited (QTreeWidgetItem)
"""
if itm:
channel = self.__network.getChannel(itm.text(0))
name = channel.getName()
key = channel.getKey()
autoJoin = channel.autoJoin()
else:
# add a new channel
name = ""
key = ""
autoJoin = False
from .IrcChannelEditDialog import IrcChannelEditDialog
dlg = IrcChannelEditDialog(name, key, autoJoin, itm is not None, self)
if dlg.exec_() == QDialog.Accepted:
from .IrcNetworkManager import IrcChannel
name, key, autoJoin = dlg.getData()
channel = IrcChannel(name)
channel.setKey(key)
channel.setAutoJoin(autoJoin)
if itm:
if autoJoin:
itm.setText(1, self.tr("Yes"))
else:
itm.setText(1, self.tr("No"))
self.__network.setChannel(channel)
else:
if autoJoin:
autoJoinTxt = self.tr("Yes")
else:
autoJoinTxt = self.tr("No")
QTreeWidgetItem(self.channelList, [name, autoJoinTxt])
self.__network.addChannel(channel)
def getNetwork(self):
"""
Public method to get the network object.
@return edited network object (IrcNetwork)
"""
self.__network.setName(self.networkEdit.text())
return self.__network
|
testmana2/test
|
Network/IRC/IrcNetworkEditDialog.py
|
Python
|
gpl-3.0
| 9,059
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from setuptools import find_packages, setup
__plugin_name__ = "WebUi"
__author__ = "Damien Churchill"
__author_email__ = "damoxc@gmail.com"
__version__ = "0.1"
__url__ = "http://deluge-torrent.org"
__license__ = "GPLv3"
__description__ = "Allows starting the web interface within the daemon."
__long_description__ = """"""
__pkg_data__ = {"deluge.plugins." + __plugin_name__.lower(): ["template/*", "data/*"]}
setup(
name=__plugin_name__,
version=__version__,
description=__description__,
author=__author__,
author_email=__author_email__,
url=__url__,
license=__license__,
long_description=__long_description__ if __long_description__ else __description__,
packages=find_packages(),
namespace_packages=["deluge", "deluge.plugins"],
package_data=__pkg_data__,
entry_points="""
[deluge.plugin.core]
%s = deluge.plugins.%s:CorePlugin
[deluge.plugin.gtkui]
%s = deluge.plugins.%s:GtkUIPlugin
""" % ((__plugin_name__, __plugin_name__.lower()) * 2)
)
|
bendykst/deluge
|
deluge/plugins/WebUi/setup.py
|
Python
|
gpl-3.0
| 1,489
|
__doc__ = """External interface to the BeautifulSoup HTML parser.
"""
__all__ = ["fromstring", "parse", "convert_tree"]
from lxml import etree, html
from calibre.ebooks.BeautifulSoup import \
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString
def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a string of HTML data into an Element tree using the
BeautifulSoup parser.
Returns the root ``<html>`` Element of the tree.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
return _parse(data, beautifulsoup, makeelement, **bsargs)
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root)
def convert_tree(beautiful_soup_tree, makeelement=None):
"""Convert a BeautifulSoup tree to a list of Element trees.
Returns a list instead of a single root Element to support
HTML-like soup with more than one root element.
You can pass a different Element factory through the `makeelement`
keyword.
"""
if makeelement is None:
makeelement = html.html_parser.makeelement
root = _convert_tree(beautiful_soup_tree, makeelement)
children = root.getchildren()
for child in children:
root.remove(child)
return children
# helpers
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if makeelement is None:
makeelement = html.html_parser.makeelement
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'xhtml' # Changed by Kovid, otherwise ' is mangled, see https://bugs.launchpad.net/calibre/+bug/1197585
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
def _convert_tree(beautiful_soup_tree, makeelement):
root = makeelement(beautiful_soup_tree.name,
attrib=dict(beautiful_soup_tree.attrs))
_convert_children(root, beautiful_soup_tree, makeelement)
return root
def _convert_children(parent, beautiful_soup_tree, makeelement):
SubElement = etree.SubElement
et_child = None
for child in beautiful_soup_tree:
if isinstance(child, Tag):
et_child = SubElement(parent, child.name, attrib=dict(
[(k, unescape(v)) for (k,v) in child.attrs]))
_convert_children(et_child, child, makeelement)
elif type(child) is NavigableString:
_append_text(parent, et_child, unescape(child))
else:
if isinstance(child, Comment):
parent.append(etree.Comment(child))
elif isinstance(child, ProcessingInstruction):
parent.append(etree.ProcessingInstruction(
*child.split(' ', 1)))
else: # CData
_append_text(parent, et_child, unescape(child))
def _append_text(parent, element, text):
if element is None:
parent.text = (parent.text or '') + text
else:
element.tail = (element.tail or '') + text
# copied from ET's ElementSoup
try:
from html.entities import name2codepoint # Python 3
name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
import re
handle_entities = re.compile("&(\w+);").sub
def unescape(string):
if not string:
return ''
# work around oddities in BeautifulSoup's entity handling
def unescape_entity(m):
try:
return unichr(name2codepoint[m.group(1)])
except KeyError:
return m.group(0) # use as is
return handle_entities(unescape_entity, string)
|
jelly/calibre
|
src/calibre/utils/soupparser.py
|
Python
|
gpl-3.0
| 4,507
|
#!/usr/bin/env python
# encoding:utf-8
"""
Filter Xroar trace files.
see README for more information.
:created: 2014 by Jens Diemer - www.jensdiemer.de
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import os
import time
import sys
import argparse
class XroarTraceFilter(object):
def __init__(self, infile, outfile):
self.infile = infile
self.outfile = outfile
def load_tracefile(self, f):
sys.stderr.write(
"\nRead %s...\n\n" % f.name
)
addr_stat = {} # TODO: Use collections.Counter
next_update = time.time() + 0.5
line_no = 0 # e.g. empty file
for line_no, line in enumerate(f):
if time.time() > next_update:
sys.stderr.write(
"\rAnalyzed %i op calls..." % line_no
)
sys.stderr.flush()
next_update = time.time() + 0.5
addr = line[:4]
addr_stat.setdefault(addr, 0)
addr_stat[addr] += 1
f.seek(0) # if also used in self.filter()
sys.stderr.write(
"\rAnalyzed %i op calls, complete.\n" % line_no
)
sys.stderr.write(
"\nThe tracefile contains %i unique addresses.\n" % len(addr_stat)
)
return addr_stat
def unique(self):
sys.stderr.write(
"\nunique %s in %s...\n\n" % (self.infile.name, self.outfile.name)
)
unique_addr = set()
total_skiped_lines = 0
skip_count = 0
last_line_no = 0
next_update = time.time() + 1
stat_out = False
for line_no, line in enumerate(self.infile):
if time.time() > next_update:
self.outfile.flush()
if stat_out:
sys.stderr.write("\r")
else:
sys.stderr.write("\n")
sys.stderr.write(
"In %i lines (%i/sec.) are %i unique address calls..." % (
line_no, (line_no - last_line_no), len(unique_addr)
)
)
stat_out = True
sys.stderr.flush()
last_line_no = line_no
next_update = time.time() + 1
addr = line[:4]
if addr in unique_addr:
total_skiped_lines += 1
skip_count += 1
continue
unique_addr.add(addr)
if skip_count != 0:
if stat_out:
# Skip info should not in the same line after stat info
sys.stderr.write("\n")
self.outfile.write(
"... [Skip %i lines] ...\n" % skip_count
)
skip_count = 0
self.outfile.write(line)
stat_out = False
self.outfile.close()
sys.stderr.write(
"%i lines was filtered.\n" % total_skiped_lines
)
def display_addr_stat(self, addr_stat, display_max=None):
if display_max is None:
sys.stdout.write(
"\nList of all called addresses:\n"
)
else:
sys.stdout.write(
"List of the %i most called addresses:\n" % display_max
)
for no, data in enumerate(sorted(self.addr_stat.items(), key=lambda x: x[1], reverse=True)):
if display_max is not None and no >= display_max:
break
sys.stdout.write(
"\tAddress %s called %s times.\n" % data
)
def get_max_count_filter(self, addr_stat, max_count=10):
sys.stderr.write(
"Filter addresses with more than %i calls:\n" % max_count
)
addr_filter = {}
for addr, count in self.addr_stat.items():
if count >= max_count:
addr_filter[addr] = count
return addr_filter
def filter(self, addr_filter):
sys.stderr.write(
"Filter %i addresses.\n" % len(addr_filter)
)
total_skiped_lines = 0
skip_count = 0
last_line_no = 0
next_update = time.time() + 1
for line_no, line in enumerate(self.infile):
if time.time() > next_update:
sys.stderr.write(
"\rFilter %i lines (%i/sec.)..." % (
line_no, (line_no - last_line_no)
)
)
sys.stderr.flush()
last_line_no = line_no
next_update = time.time() + 1
addr = line[:4]
if addr in addr_filter:
total_skiped_lines += 1
skip_count += 1
continue
if skip_count != 0:
self.outfile.write(
"... [Skip %i lines] ...\n" % skip_count
)
skip_count = 0
self.outfile.write(line)
self.outfile.close()
sys.stderr.write(
"%i lines was filtered.\n" % total_skiped_lines
)
def start_stop(self, start_addr, stop_addr):
sys.stderr.write(
"\nFilter starts with $%x and ends with $%x from %s in %s...\n\n" % (
start_addr, stop_addr,
self.infile.name, self.outfile.name
)
)
all_addresses = set()
passed_addresses = set()
start_seperator = "\n ---- [ START $%x ] ---- \n" % start_addr
end_seperator = "\n ---- [ END $%x ] ---- \n" % stop_addr
last_line_no = 0
next_update = time.time() + 1
stat_out = False
in_area = False
for line_no, line in enumerate(self.infile):
try:
addr = int(line[:4], 16)
except ValueError:
continue
passed_addresses.add(addr)
if in_area:
self.outfile.write(line)
stat_out = False
if addr == stop_addr:
sys.stderr.flush()
self.outfile.flush()
sys.stderr.write(end_seperator)
self.outfile.write(end_seperator)
sys.stderr.flush()
self.outfile.flush()
in_area = False
continue
else:
if addr == start_addr:
sys.stderr.flush()
self.outfile.flush()
sys.stderr.write(start_seperator)
self.outfile.write(start_seperator)
in_area = True
self.outfile.write(line)
sys.stderr.flush()
self.outfile.flush()
stat_out = False
continue
if time.time() > next_update:
self.outfile.flush()
if stat_out:
sys.stderr.write("\r")
else:
sys.stderr.write("\n")
sys.stderr.write(
"process %i lines (%i/sec.), wait for $%x..." % (
line_no, (line_no - last_line_no), start_addr,
)
)
passed_addresses -= all_addresses
if passed_addresses:
all_addresses.update(passed_addresses)
passed_addresses = ",".join(["$%x" % i for i in passed_addresses])
sys.stderr.write(
"\nPassed unique addresses: %s\n" % passed_addresses
)
passed_addresses = set()
else:
stat_out = True
sys.stderr.flush()
last_line_no = line_no
next_update = time.time() + 1
self.outfile.close()
def main(args):
xt = XroarTraceFilter(args.infile, args.outfile)
if args.unique:
xt.unique()
return
if args.start_stop:
xt.start_stop(*args.start_stop)
return
if args.loop_filter:
addr_stat = xt.load_tracefile(args.loop_filter)
xt.filter(addr_filter=addr_stat)
if "display" in args:
addr_stat = xt.load_tracefile(args.infile)
xt.display_addr_stat(addr_stat,
display_max=args.display
)
if args.filter:
addr_stat = xt.load_tracefile(args.infile)
addr_filter = xt.get_max_count_filter(addr_stat,
max_count=args.filter
)
xt.filter(addr_filter)
def start_stop_value(arg):
start_raw, stop_raw = arg.split("-")
start = int(start_raw.strip("$ "), 16)
stop = int(stop_raw.strip("$ "), 16)
sys.stderr.write("Use: $%x-$%x" % (start, stop))
return (start, stop)
def get_cli_args():
parser = argparse.ArgumentParser(description="Filter Xroar traces")
parser.add_argument("infile", nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help="Xroar trace file or stdin"
)
parser.add_argument("outfile", nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help="If given: write output in a new file else: Display it."
)
parser.add_argument("--display", metavar="MAX",
type=int, default=argparse.SUPPRESS,
nargs="?",
help="Display statistics how often a address is called.",
)
parser.add_argument("--filter", metavar="MAX",
type=int,
nargs="?",
help="Filter the trace: skip addresses that called more than given count.",
)
parser.add_argument("--unique",
action="store_true",
help="Read infile and store in outfile only unique addresses.",
)
parser.add_argument("--loop-filter", metavar="FILENAME",
type=argparse.FileType("r"),
nargs="?",
help="Live Filter with given address file.",
)
parser.add_argument("--start-stop", metavar="START-STOP",
type=start_stop_value,
nargs="?",
help="Enable trace only from $START to $STOP e.g.: --area=$4000-$5000",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
# sys.argv += ["--area=broken"]
# sys.argv += ["--area=1234-5678"]
args = get_cli_args()
main(args)
|
JuhaniImberg/DragonPy
|
misc/filter_xroar_trace.py
|
Python
|
gpl-3.0
| 10,452
|
#
#
# Copyright 2011,2013 Luis Ariel Vega Soliz, Uremix (http://www.uremix.org) and contributors.
#
#
# This file is part of UADH (Uremix App Developer Helper).
#
# UADH is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UADH is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UADH. If not, see <http://www.gnu.org/licenses/>.
#
#
'''
Created on 08/09/2012
@author: Luis Ariel Vega Soliz (ariel.vega@uremix.org)
@contact: Uremix Team (http://uremix.org)
'''
|
arielvega/uremix-app-developer-helper
|
src/uadh/gui/tkinter/__init__.py
|
Python
|
gpl-3.0
| 936
|
../../../../../../share/pyshared/mx/DateTime/mxDateTime/test.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/mx/DateTime/mxDateTime/test.py
|
Python
|
gpl-3.0
| 63
|
from spec.python import db_connection
import sam.common
import sam.constants
import web
app = web.application(sam.constants.urls, globals(), autoreload=False)
sam.common.session_store = web.session.DBStore(db_connection.db, 'sessions')
sam.common.session = web.session.Session(app, sam.common.session_store)
# TODO: these commands ping the prod server instead of the test server for the session table.
# If the prod server is missing, these fail.
# I'm not sure why they do that.
def test_404():
with db_connection.env(login_active=False):
req = app.request('/invalidendpoint', method='GET')
assert req.status == "404 Not Found"
req = app.request('/invalidendpoint', method='POST')
assert req.status == "404 Not Found"
def test_exists_map():
with db_connection.env(login_active=False):
req = app.request('/map', method='POST')
assert req.status == "405 Method Not Allowed"
req = app.request('/map?q=42', method='GET')
assert req.status == "200 OK"
def test_exists_stats():
with db_connection.env(login_active=False):
req = app.request('/stats', 'GET')
assert req.status == "200 OK"
req = app.request('/stats', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_nodes():
with db_connection.env(login_active=False):
req = app.request('/nodes', 'GET')
assert req.status == "200 OK"
req = app.request('/nodes', 'POST')
assert req.status == "200 OK"
def test_exists_links():
with db_connection.env(login_active=False):
req = app.request('/links', 'GET')
assert req.status == "200 OK"
req = app.request('/links', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_details():
with db_connection.env(login_active=False):
req = app.request('/details', 'GET')
assert req.status == "200 OK"
req = app.request('/details', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_portinfo():
with db_connection.env(login_active=False):
req = app.request('/portinfo', 'GET')
assert req.status == "200 OK"
req = app.request('/portinfo', 'POST')
assert req.status == "200 OK"
def test_exists_metadata():
with db_connection.env(login_active=False):
req = app.request('/metadata', 'GET')
assert req.status == "200 OK"
req = app.request('/metadata', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_table():
with db_connection.env(login_active=False):
req = app.request('/table', 'GET')
assert req.status == "200 OK"
req = app.request('/table', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_settings():
with db_connection.env(login_active=False):
req = app.request('/settings', 'GET')
assert req.status == "200 OK"
req = app.request('/settings', 'POST')
assert req.status == "200 OK"
def test_exists_settings_page():
with db_connection.env(login_active=False):
req = app.request('/settings_page', 'GET')
assert req.status == "200 OK"
req = app.request('/settings_page', 'POST')
assert req.status == "405 Method Not Allowed"
def test_exists_login():
with db_connection.env(login_active=True):
req = app.request('/login', 'GET')
assert req.status == "200 OK"
req = app.request('/login', 'POST')
assert req.status == "200 OK"
def test_exists_logout():
with db_connection.env(login_active=True, mock_session=True):
req = app.request('/logout', 'GET')
assert req.status == "303 See Other"
req = app.request('/logout', 'POST')
assert req.status == "405 Method Not Allowed"
|
riolet/SAM
|
spec/python/test_server.py
|
Python
|
gpl-3.0
| 3,851
|
async def ev_mention(ev, message):
def_stat_data = {
'event': 'mention',
'count': 0
}
collection = 'EventStats'
database = ev.bot.cfg.db.database
check = ev.db[database][collection].find_one({"event": 'mention'})
if not check:
ev.db[database][collection].insert_one(def_stat_data)
ev_count = 0
else:
ev_count = check['count']
ev_count += 1
update_target = {"event": 'mention'}
update_data = {"$set": {'count': ev_count}}
ev.db[database][collection].update_one(update_target, update_data)
|
lu-ci/apex-sigma-plugins
|
core_functions/stats/ev_mention.py
|
Python
|
gpl-3.0
| 572
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup
PACKAGE_NAME = 'mozdevice'
PACKAGE_VERSION = '0.44'
deps = ['mozfile >= 1.0',
'mozlog >= 2.1',
'moznetwork >= 0.24',
'mozprocess >= 0.19',
]
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Mozilla-authored device management",
long_description="see http://mozbase.readthedocs.org/",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozdevice'],
include_package_data=True,
zip_safe=False,
install_requires=deps,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
dm = mozdevice.dmcli:cli
sutini = mozdevice.sutini:main
""",
)
|
kostaspl/SpiderMonkey38
|
testing/mozbase/mozdevice/setup.py
|
Python
|
mpl-2.0
| 1,156
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
By default, this module uses the pre-built Ember model from
https://pubdata.endgame.com/ember/ember_dataset.tar.bz2.
Documentation about training a new model can be found on the Ember GitHub page
(https://github.com/endgameinc/ember).
After training a new model, place the resulting txt file in
`multiscanner/etc` and update `config.ini` with the new filename.
"""
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
from pathlib import Path
from multiscanner import CONFIG
__authors__ = "Patrick Copeland"
__license__ = "MPL 2.0"
TYPE = "MachineLearning"
NAME = "EndgameEmber"
REQUIRES = ['libmagic']
DEFAULTCONF = {
'ENABLED': False,
'path-to-model': os.path.join(os.path.split(CONFIG)[0], 'etc', 'ember', 'ember_model_2017.txt'),
}
LGBM_MODEL = None
try:
import ember
has_ember = True
except ImportError as e:
print("ember module not installed...")
has_ember = False
try:
import lightgbm as lgb
except ImportError as e:
print("lightgbm module needed for ember. Not installed...")
has_ember = False
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if not has_ember:
return False
if not Path(conf['path-to-model']).is_file():
print("'{}' does not exist. Check config.ini for model location.".format(conf['path-to-model']))
return False
try:
global LGBM_MODEL
LGBM_MODEL = lgb.Booster(model_file=conf['path-to-model'])
except lgb.LightGBMError as e:
print("Unable to load model, {}. ({})".format(conf['path-to-model'], e))
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
for fname in filelist:
# Ensure libmagic returns results
if REQUIRES[0] is not None:
# only run the analytic if it is an Office document
file_type = _get_libmagicresults(REQUIRES[0][0], fname)
if file_type.startswith('PE32'):
with open(fname, 'rb') as fh:
ember_result = ember.predict_sample(LGBM_MODEL, fh.read())
results.append(
(fname, {'Prediction': ember_result})
)
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
return (results, metadata)
def _get_libmagicresults(results, fname):
libmagicdict = dict(results)
return libmagicdict.get(fname)
|
mitre/multiscanner
|
multiscanner/modules/MachineLearning/EndgameEmber.py
|
Python
|
mpl-2.0
| 2,656
|
# -*- coding: utf-8 -*-
import json
from . import check_input_attribute, standard_error_message
from pyipasnhistory import IPASNHistory
from pymisp import MISPAttribute, MISPEvent, MISPObject
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst'], 'format': 'misp_standard'}
moduleinfo = {'version': '0.2', 'author': 'Raphaël Vinot',
'description': 'Query an IP ASN history service (https://github.com/CIRCL/IP-ASN-history.git)',
'module-type': ['expansion', 'hover']}
def parse_result(attribute, values):
event = MISPEvent()
initial_attribute = MISPAttribute()
initial_attribute.from_dict(**attribute)
event.add_attribute(**initial_attribute)
mapping = {'asn': ('AS', 'asn'), 'prefix': ('ip-src', 'subnet-announced')}
print(values)
for last_seen, response in values['response'].items():
asn = MISPObject('asn')
asn.add_attribute('last-seen', **{'type': 'datetime', 'value': last_seen})
for feature, attribute_fields in mapping.items():
attribute_type, object_relation = attribute_fields
asn.add_attribute(object_relation, **{'type': attribute_type, 'value': response[feature]})
asn.add_reference(initial_attribute.uuid, 'related-to')
event.add_object(**asn)
event = json.loads(event.to_json())
return {key: event[key] for key in ('Attribute', 'Object')}
def handler(q=False):
if q is False:
return False
request = json.loads(q)
if not request.get('attribute') or not check_input_attribute(request['attribute']):
return {'error': f'{standard_error_message}, which should contain at least a type, a value and an uuid.'}
if request['attribute']['type'] not in mispattributes['input']:
return {'error': 'Unsupported attribute type.'}
toquery = request['attribute']['value']
ipasn = IPASNHistory()
values = ipasn.query(toquery)
if not values:
misperrors['error'] = 'Unable to find the history of this IP'
return misperrors
return {'results': parse_result(request['attribute'], values)}
def introspection():
return mispattributes
def version():
return moduleinfo
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/ipasn.py
|
Python
|
agpl-3.0
| 2,208
|
# Generated by Django 2.2.13 on 2020-07-30 15:02
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0526_unique_together_groupelementyear'),
]
operations = [
migrations.AlterField(
model_name='groupelementyear',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT,
to='base.EducationGroupYear'),
),
]
|
uclouvain/OSIS-Louvain
|
base/migrations/0527_auto_20200730_1502.py
|
Python
|
agpl-3.0
| 560
|
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
|
Comunitea/CMNT_004_15
|
project-addons/prepaid_order_discount/__init__.py
|
Python
|
agpl-3.0
| 867
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import re
from . import unit_base
from decimal import Decimal
from flask_babel import lazy_gettext
def convert(feet, inches):
"""Converts from feet and inches to cm or m
If feet contains '-' then inches won't have '-'
If inches contains '-' then feet value will be 0
:param feet: Feet value in string
:param inches: Inch value in string
:return: cm or m value in string, and the symbol as 'm' or 'cm'
"""
foot_to_cm_rate = Decimal(30.48)
inch_to_cm_rate = Decimal(2.54)
total_centimeters = []
symbol = "cm"
if "-" in feet:
feet_list = feet.split("-")
total_centimeters = [(Decimal(m) * foot_to_cm_rate) + (Decimal(inches) * inch_to_cm_rate) for m in feet_list]
elif "-" in inches:
inches_list = inches.split("-")
total_centimeters = [(Decimal(i) * inch_to_cm_rate) for i in inches_list]
else:
# no multi values
total_centimeters = [(Decimal(feet) * foot_to_cm_rate) + (Decimal(inches) * inch_to_cm_rate)]
if any(c for c in total_centimeters if c > Decimal(100)):
# if the value is greater than 100 then convert it to meter
total_centimeters = [unit_base.format_converted((c / Decimal(100)), precision=2) for c in total_centimeters]
symbol = "m"
else:
total_centimeters = [unit_base.format_converted(c, precision=2) for c in total_centimeters]
return "-".join(total_centimeters), symbol
def do_conversion(item, converter, formatter, search_param):
"""Performs the conversion"""
diff = {}
# Group indexes
match_index = 0 # Index of complete match i.e. 5' 10"
value_index = 1 # Index of the value: contains feet if feet is in the match else inches if there's no feet
feet_symbol_index = 7 # Index of feet symbol ', ft, feet, foot
inches_with_feet_value_index = 11 # When there is a feet and inch value matched together
inches_symbol_index = 5 # Index of inches symbol ", in, inch(es)
def convert(match):
match_item = match.group(match_index).strip()
from_value = match.group(value_index)
inches_from_value = "0"
feet_symbol = match.group(feet_symbol_index)
inches_symbol = match.group(inches_symbol_index)
multi_values = "-" in from_value and from_value[-1:] != "-"
if match_item and from_value:
if feet_symbol:
# check if any inches matched
inches_from_value = match.group(inches_with_feet_value_index) or "0"
elif inches_symbol:
# no feet matching
inches_from_value = from_value
from_value = "0"
else:
return {}
if not multi_values:
from_value = re.sub(r"[^\d.]", "", from_value)
inches_from_value = re.sub(r"[^\d.]", "", inches_from_value)
to_value, symbol = converter(from_value, inches_from_value)
diff.setdefault(match_item.strip(), formatter(match_item.strip(), to_value, symbol))
return diff[match_item]
for field in unit_base.macro_replacement_fields:
if item.get(field, None):
re.sub(search_param, convert, item[field])
return (item, diff)
def feet_inches_to_metric(item, **kwargs):
"""Converts distance values from feet and inches to metric"""
regex = (
r"(\d+-?,?\.?\d*)((\s*)|(-))(((\'|ft\.?|[fF]eet|[fF]oot)"
r'((-)|(\s*))(\d+)?\s?("|in)?)|(\"|[iI]nches|[iI]nch|in))'
)
return do_conversion(item, convert, unit_base.format_output, regex)
name = "feet_inches_to_metric"
label = lazy_gettext("Length feet-inches to metric")
callback = feet_inches_to_metric
access_type = "frontend"
action_type = "interactive"
group = lazy_gettext("length")
|
petrjasek/superdesk-core
|
superdesk/macros/imperial/length_feet_and_inches_to_metric.py
|
Python
|
agpl-3.0
| 4,093
|
from yaml import dump
from twisted.internet.defer import succeed, fail
from txaws.s3.exception import S3Error
from juju.lib.testing import TestCase
from juju.providers.ec2.tests.common import EC2TestMixin
class EC2StateTest(TestCase, EC2TestMixin):
def setUp(self):
EC2TestMixin.setUp(self)
super(EC2StateTest, self).setUp()
def test_save(self):
"""
When passed some juju ec2 machine instances and asked to save,
the machine, it will serialize the data to an s3 bucket.
"""
instances = [self.get_instance("i-foobar", dns_name="x1.example.com")]
state = dump(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
self.s3.put_object(
self.env_name, "provider-state", state),
self.mocker.result(succeed(state))
self.mocker.replay()
provider = self.get_provider()
d = provider.save_state(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
def assert_state(saved_state):
self.assertEqual(saved_state, state)
d.addCallback(assert_state)
return d
def test_save_non_existant_bucket(self):
"""
When saving instance information to S3 the EC2 provider will create a
namespaced bucket specific to the provider instance, if it does not
already exist.
"""
instances = [self.get_instance("i-foobar", dns_name="x1.example.com")]
state = dump(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
self.s3.put_object(
self.env_name, "provider-state", state),
error = S3Error("<error/>", 404)
error.errors = [{"Code": "NoSuchBucket"}]
self.mocker.result(fail(error))
self.s3.create_bucket(self.env_name)
self.mocker.result(succeed({}))
self.s3.put_object(
self.env_name, "provider-state", state),
self.mocker.result(succeed(state))
self.mocker.replay()
provider = self.get_provider()
d = provider.save_state(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
def assert_state(saved_state):
self.assertEqual(saved_state, state)
d.addCallback(assert_state)
return d
def test_load(self):
"""
The provider bootstrap will load and deserialize any saved state from
s3.
"""
self.s3.get_object(self.env_name, "provider-state")
self.mocker.result(succeed(dump({"zookeeper-instances": []})))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertEqual(value, {"zookeeper-instances": []})
d.addCallback(assert_load_value)
return d
def test_load_nonexistant_bucket(self):
"""
When loading saved state from s3, the system returns False if the
s3 control bucket does not exist.
"""
self.s3.get_object(self.env_name, "provider-state")
error = S3Error("<error/>", 404)
error.errors = [{"Code": "NoSuchBucket"}]
self.mocker.result(fail(error))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertIdentical(value, False)
d.addCallback(assert_load_value)
return d
def test_load_nonexistant(self):
"""
When loading saved state from S3, the provider bootstrap gracefully
handles the scenario where there is no saved state.
"""
self.s3.get_object(self.env_name, "provider-state")
self.mocker.result(succeed(dump([])))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertIdentical(value, False)
d.addCallback(assert_load_value)
return d
|
anbangr/trusted-juju
|
juju/providers/ec2/tests/test_state.py
|
Python
|
agpl-3.0
| 4,134
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import unittest
try:
# NOTE need to import capnp first to activate the magic necessary for
# PythonDummyRegion_capnp, etc.
import capnp
except ImportError:
capnp = None
else:
from nupic.proto.NetworkProto_capnp import NetworkProto
import nupic.bindings.engine_internal as engine
from nupic.bindings.tools.serialization_test_py_region import \
SerializationTestPyRegion
class NetworkTest(unittest.TestCase):
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testCapnpSerializationWithPyRegion(self):
"""Test capnp (de)serialization of network containing a python region"""
engine.Network.registerPyRegion(__name__,
SerializationTestPyRegion.__name__)
try:
srcNet = engine.Network()
srcNet.addRegion(SerializationTestPyRegion.__name__,
"py." + SerializationTestPyRegion.__name__,
json.dumps({
"dataWidth": 128,
"randomSeed": 99,
}))
# Serialize
builderProto = NetworkProto.new_message()
srcNet.write(builderProto)
# Construct NetworkProto reader from populated builder
readerProto = NetworkProto.from_bytes(builderProto.to_bytes())
# Deserialize
destNet = engine.Network.read(readerProto)
destRegion = destNet.getRegions().getByName(
SerializationTestPyRegion.__name__)
self.assertEqual(destRegion.getParameterUInt32("dataWidth"), 128)
self.assertEqual(destRegion.getParameterUInt32("randomSeed"), 99)
finally:
engine.Network.unregisterPyRegion(SerializationTestPyRegion.__name__)
def testSimpleTwoRegionNetworkIntrospection(self):
# Create Network instance
network = engine.Network()
# Add two TestNode regions to network
network.addRegion("region1", "TestNode", "")
network.addRegion("region2", "TestNode", "")
# Set dimensions on first region
region1 = network.getRegions().getByName("region1")
region1.setDimensions(engine.Dimensions([1, 1]))
# Link region1 and region2
network.link("region1", "region2", "UniformLink", "")
# Initialize network
network.initialize()
for linkName, link in network.getLinks():
# Compare Link API to what we know about the network
self.assertEqual(link.toString(), linkName)
self.assertEqual(link.getDestRegionName(), "region2")
self.assertEqual(link.getSrcRegionName(), "region1")
self.assertEqual(link.getLinkType(), "UniformLink")
self.assertEqual(link.getDestInputName(), "bottomUpIn")
self.assertEqual(link.getSrcOutputName(), "bottomUpOut")
break
else:
self.fail("Unable to iterate network links.")
|
scottpurdy/nupic.core
|
bindings/py/tests/network_test.py
|
Python
|
agpl-3.0
| 3,782
|
#Parsing program to sort through Investopedia
import urllib2
import re
#This is the code to parse the List of Terms
def get_glossary(res_num):
html_lowered = res_num.lower();
begin = html_lowered.find('<!-- .alphabet -->')
end = html_lowered.find('<!-- .idx-1 -->')
if begin == -1 or end == -1:
return None
else:
return res_num[begin+len('<!-- .alphabet -->'):end].strip()
#This is the code to parse the Title
def get_title(res_num):
html_lowered = res_num.lower();
begin = html_lowered.find('<title>')
end = html_lowered.find('</title>')
if begin == -1 or end == -1:
return None
else:
return res_num[begin+len('<title>'):end].strip()
#We start with the numbers section of Investopedia
url = "http://www.investopedia.com/terms/1/"
res_num=""
for line in urllib2.urlopen(url):
res_num+=line
title_num = get_title(res_num)
glossary_num = get_glossary(res_num)
##Find all hyperlinks in list then eliminate duplicates
glossary_parsed_num = re.findall(r'href=[\'"]?([^\'" >]+)', glossary_num)
glossary_parsed_num = list(set(glossary_parsed_num))
parent_url = 'http://www.investopedia.com'
tail = ' Definition | Investopedia'
short_tail = ' | Investopedia'
print title_num
gp_list = []
for x in glossary_parsed_num:
gpn = parent_url + x
res_num=""
for line in urllib2.urlopen(gpn):
res_num+=line
gpn_title = get_title(res_num)
gpn_penult = gpn_title.replace(tail,'')
gpn_final = gpn_penult.replace(short_tail,'')
gp_list.append(gpn_final)
#The alphabet section of Investopedia terms begins here
alfa = [chr(i) for i in xrange(ord('a'), ord('z')+1)]
for i, v in enumerate(alfa):
u = 'http://www.investopedia.com/terms/'
w = '/'
invest_alfa_url = u + v + w
# get url info
res_alfa=""
for line in urllib2.urlopen(invest_alfa_url):
res_alfa+=line
glossary_alfa = get_glossary(res_alfa)
title_alfa = get_title(res_alfa)
glossary_parsed_alfa = re.findall(r'href=[\'"]?([^\'" >]+)', glossary_alfa)
glossary_parsed_alfa = list(set(glossary_parsed_alfa))
print title_alfa
for x in glossary_parsed_alfa:
gpa = parent_url + x
res_num=""
for line in urllib2.urlopen(gpa):
res_num+=line
gpa_title = get_title(res_num)
gpa_penult = gpa_title.replace(tail,'')
gpa_final = gpa_penult.replace(short_tail,'')
gp_list.append(gpa_final)
#Write the new list to the file
with open('dict.dat','w') as f:
for item in gp_list:
f.write('%s\n' % item)
#Read back file to check the stock was added correctly
with open('dict.dat') as f:
gp_list = f.readlines()
gp_list = map(lambda s: s.strip(), gp_list)
gp_list = list(set(gp_list))
print gp_list
print ''
|
secondfoundation/Second-Foundation-Src
|
src/haruspex/python/echelon/investopedia_generator.py
|
Python
|
lgpl-2.1
| 2,630
|
# ***************************************************************************
# * *
# * Copyright (c) 2016 - Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "Command GMSH Mesh From Shape"
__author__ = "Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
## @package CommandFemMeshGmshFromShape
# \ingroup FEM
import FreeCAD
from FemCommands import FemCommands
import FreeCADGui
import FemGui
from PySide import QtCore
class _CommandFemMeshGmshFromShape(FemCommands):
# the FEM_MeshGmshFromShape command definition
def __init__(self):
super(_CommandFemMeshGmshFromShape, self).__init__()
self.resources = {'Pixmap': 'fem-femmesh-gmsh-from-shape',
'MenuText': QtCore.QT_TRANSLATE_NOOP("FEM_MeshGmshFromShape", "FEM mesh from shape by GMSH"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("FEM_MeshGmshFromShape", "Create a FEM mesh from a shape by GMSH mesher")}
self.is_active = 'with_part_feature'
def Activated(self):
FreeCAD.ActiveDocument.openTransaction("Create FEM mesh by GMSH")
FreeCADGui.addModule("FemGui")
sel = FreeCADGui.Selection.getSelection()
if (len(sel) == 1):
if(sel[0].isDerivedFrom("Part::Feature")):
mesh_obj_name = sel[0].Name + "_Mesh"
FreeCADGui.addModule("ObjectsFem")
FreeCADGui.doCommand("ObjectsFem.makeMeshGmsh('" + mesh_obj_name + "')")
FreeCADGui.doCommand("App.ActiveDocument.ActiveObject.Part = App.ActiveDocument." + sel[0].Name)
if FemGui.getActiveAnalysis():
FreeCADGui.addModule("FemGui")
FreeCADGui.doCommand("FemGui.getActiveAnalysis().Member = FemGui.getActiveAnalysis().Member + [App.ActiveDocument.ActiveObject]")
FreeCADGui.doCommand("Gui.ActiveDocument.setEdit(App.ActiveDocument.ActiveObject.Name)")
FreeCADGui.Selection.clearSelection()
FreeCADGui.addCommand('FEM_MeshGmshFromShape', _CommandFemMeshGmshFromShape())
|
bblacey/FreeCAD-MacOS-CI
|
src/Mod/Fem/PyGui/_CommandFemMeshGmshFromShape.py
|
Python
|
lgpl-2.1
| 3,534
|
#!/usr/bin/env python
import glob
import os
import sys
import unittest
import common
if len(sys.argv) > 1:
builddir = sys.argv[1]
no_import_hooks = True
else:
builddir = '..'
no_import_hooks = False
common.run_import_tests(builddir, no_import_hooks)
SKIP_FILES = ['common', 'runtests']
dir = os.path.split(os.path.abspath(__file__))[0]
os.chdir(dir)
def gettestnames():
files = [fname[:-3] for fname in glob.glob('test*.py')
if fname not in SKIP_FILES]
return files
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for name in gettestnames():
suite.addTest(loader.loadTestsFromName(name))
testRunner = unittest.TextTestRunner()
testRunner.run(suite)
|
mate-desktop/python-mate-desktop
|
tests/runtests.py
|
Python
|
lgpl-2.1
| 722
|
from yandextank.plugins.Aggregator import SecondAggregateData
from yandextank.plugins.Autostop import AutostopPlugin
from Tank_Test import TankTestCase
import tempfile
import unittest
class AutostopTestCase(TankTestCase):
def setUp(self):
core = self.get_core()
core.load_configs(['config/autostop.conf'])
core.load_plugins()
core.plugins_configure()
self.foo = AutostopPlugin(core)
def tearDown(self):
del self.foo
self.foo = None
def test_run(self):
data = SecondAggregateData()
data.overall.avg_response_time = 11
self.foo.core.set_option(self.foo.SECTION, "autostop", "time(1,10)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_http(self):
data = SecondAggregateData()
data.overall.http_codes = {'200':11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (200, 10, 5 )\nhttp (3xx, 1.5%, 10m)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_net(self):
data = SecondAggregateData()
data.overall.net_codes = {71:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "net (71, 1, 5)\nnet (xx, 1.5%, 10m )")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_quan(self):
data = SecondAggregateData()
data.overall.quantiles = {99.0:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "quantile(99,2,3)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_false_trigger_bug(self):
data = SecondAggregateData()
data.overall.http_codes = {}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (5xx, 100%, 1)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() >= 0:
raise RuntimeError()
self.foo.end_test(0)
if __name__ == '__main__':
unittest.main()
|
asekretenko/yandex-tank
|
tests/Autostop_Test.py
|
Python
|
lgpl-2.1
| 3,041
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Johan Dahlin <jdahlin@async.com.br>
##
#
# Documentation references:
#
# http://en.wikipedia.org/wiki/ESC/P
# http://www.epson.co.uk/support/manuals/pdf/ESCP/Part_1.pdf
# http://www.epson.co.uk/support/manuals/pdf/ESCP/Part_2.pdf
#
""" Driver for EPSON Esc/P and Esc/P2 printers. """
import struct
ESC = '\x1b'
CMD_INIT = '@'
CMD_PRINT_QUALITY = 'x'
CMD_PROPORTIONAL = 'p'
CMD_FORM_FEED = '\xff'
CMD_EJECT = '\x19'
QUALITY_DRAFT = '0'
QUALITY_LQ = '1'
QUALITY_NLQ = '1'
class EscPPrinter(object):
def __init__(self, device):
self.device = device
self.fp = open(device, 'w')
self._command(CMD_INIT)
def _command(self, command, *args):
chars = command
for arg in args:
if arg is True:
v = '1'
elif arg is False:
v = '0'
else:
v = arg
chars += v
cmd = '%s%s' % (ESC, chars)
self.send(cmd)
def send(self, data):
self.fp.write(data)
self.fp.flush()
def set_draft_mode(self):
self._command(CMD_PRINT_QUALITY, QUALITY_DRAFT)
def set_proportional(self, proportional):
self._command(CMD_PROPORTIONAL, proportional)
def done(self):
self._command(CMD_INIT)
def form_feed(self):
self._command(CMD_FORM_FEED)
def set_vertical_position(self, position):
args = struct.pack('b', position)
self._command('J', *args)
def test():
printer = EscPPrinter('/dev/lp0')
printer.send(
'Lorem ipsum dolor sit amet, consectetuer adipiscing elit. '
'Ut a velit sit amet nisl hendrerit lacinia. Nunc eleifend '
'cursus risus. Vivamus libero libero, dignissim ut, pulvinar id, '
'blandit a, leo amet.\n'.upper())
printer.done()
if __name__ == '__main__':
test()
|
Vauxoo/stoqdrivers
|
stoqdrivers/escp.py
|
Python
|
lgpl-2.1
| 2,767
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('slice_hound')
mobileTemplate.setLevel(28)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(65)
mobileTemplate.setHideType("Bristly Hide")
mobileTemplate.setHideAmount(35)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(30)
mobileTemplate.setSocialGroup("slice hound")
mobileTemplate.setAssistRange(2)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_corellian_slice_hound.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_hamstring_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('slice_hound', mobileTemplate)
return
|
agry/NGECore2
|
scripts/mobiles/corellia/slice_hound.py
|
Python
|
lgpl-3.0
| 1,618
|
import sys
# Project SWG: Jabba TP Romo Vax Bunker: Static Spawns
# (C)2014 ProjectSWG
from resources.datatables import Options
from resources.datatables import State
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.objectService
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5079.1), float(47.9), float(-6970.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5063.5), float(49.2), float(-6998.4), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5051.8), float(45.7), float(-6989.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5030.7), float(46.5), float(-6972.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5019), float(48.3), float(-6946.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5045.8), float(42.6), float(-6936), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5053.7), float(43.6), float(-6961.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5057.5), float(43.9), float(-6961.5), float(0), float(0), float(0), float(0), 45)
# TODO Check all NPCs for personalized scripting, change format.
bunker = core.objectService.getObject(long(-466404037494797872))
if bunker is not None:
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(2), float(-3.8), float(0.3), float(2.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(3), float(3.6), float(0.3), float(-3.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(29.8), float(-12), float(25.4), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(32.7), float(-12), float(35), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(22.9), float(-12), float(30.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(4), float(3.9), float(-12), float(21), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(4), float(3.9), float(-12), float(38.1), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(6), float(3.5), float(-16), float(53), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(58.8), float(-16), float(61), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(74.4), float(-16), float(66.6), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(68.3), float(-16), float(79.2), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(44.6), float(-16), float(82.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(26), float(-16), float(79), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(6.9), float(-16), float(78.1), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(-5.2), float(-16), float(77.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(-19.9), float(-16), float(78.8), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(9), float(32.4), float(-14), float(78.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('fighting_romo_vax', 'tatooine', bunker.getCellByCellNumber(9), float(-43.5), float(-14), float(-78.9), float(0), float(0), float(0), float(0), 45)
return
|
agry/NGECore2
|
scripts/static_spawns/tatooine/jabba_tp_romo_vax_bunker.py
|
Python
|
lgpl-3.0
| 4,612
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of TethysCluster.
#
# TethysCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TethysCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TethysCluster. If not, see <http://www.gnu.org/licenses/>.
"""
Azure Utility Classes
"""
import os
import re
import time
import base64
import string
import tempfile
import fnmatch
from azure import *
from azure.servicemanagement import *
from pprint import pprint
from tethyscluster import image
from tethyscluster import utils
from tethyscluster import static
from tethyscluster import spinner
from tethyscluster import sshutils
from tethyscluster import webtools
from tethyscluster import exception
from tethyscluster import progressbar
from tethyscluster.utils import print_timing
from tethyscluster.logger import log
class EasyAzure(object):
def __init__(self, subscription_id, certificate_path,
connection_authenticator, **kwargs):
"""
Create an EasyAzure object.
Requires aws_access_key_id/aws_secret_access_key from an Amazon Web
Services (AWS) account and a connection_authenticator function that
returns an authenticated AWS connection object
Providing only the keys will default to using Amazon EC2
kwargs are passed to the connection_authenticator's constructor
"""
self.subscription_id = subscription_id
self.certificate_path = certificate_path
self.connection_authenticator = connection_authenticator
self._conn = None
self._subscription_name = None
self._kwargs = kwargs
def reload(self):
self._conn = None
return self.conn
@property
def conn(self):
if self._conn is None:
log.debug('creating self._conn w/ connection_authenticator ' +
'kwargs = %s' % self._kwargs)
# validate_certs = self._kwargs.get('validate_certs', True)
# if validate_certs:
# # if not HAVE_HTTPS_CONNECTION:
# raise exception.AWSError(
# "Failed to validate AWS SSL certificates. "
# "SSL certificate validation is only supported "
# "on Python>=2.6.\n\nSet AWS_VALIDATE_CERTS=False in "
# "the [aws info] section of your config to skip SSL "
# "certificate verification and suppress this error AT "
# "YOUR OWN RISK.")
# if not boto_config.has_section('Boto'):
# boto_config.add_section('Boto')
# # Hack to get around the fact that boto ignores validate_certs
# # if https_validate_certificates is declared in the boto config
# boto_config.setbool('Boto', 'https_validate_certificates',
# validate_certs)
self._conn = self.connection_authenticator(
self.subscription_id, self.certificate_path,
**self._kwargs)
# self._conn.https_validate_certificates = validate_certs
return self._conn
@property
def subscription_name(self):
if not self._subscription_name:
subscription_name = self.conn.get_subscription().subscription_name.replace(' ', '-')
self._subscription_name = subscription_name
return base64.b64encode(self.subscription_id)
class EasySMS(EasyAzure):
def __init__(self, subscription_id, certificate_path,
host=None, request_session=None, location='West US', **kwargs):
kwds = dict(request_session=request_session)
super(EasySMS, self).__init__(subscription_id, certificate_path,
azure.servicemanagement.ServiceManagementService, **kwds)
self._conn = kwargs.get('connection')
# kwds = dict(aws_s3_host=aws_s3_host, aws_s3_path=aws_s3_path,
# aws_port=aws_port, aws_is_secure=aws_is_secure,
# aws_proxy=aws_proxy, aws_proxy_port=aws_proxy_port,
# aws_proxy_user=aws_proxy_user,
# aws_proxy_pass=aws_proxy_pass,
# aws_validate_certs=aws_validate_certs)
# self.s3 = EasyS3(aws_access_key_id, aws_secret_access_key, **kwds)
self._regions = None
self._region = self.get_region(location)
self._account_attrs = None
self._account_attrs_region = None
def __repr__(self):
return '<EasySMS: %s (%s)>' % (self.region.name, ' '.join(self.region.available_services))
def _fetch_account_attrs(self):
raise NotImplementedError()
@property
def supported_platforms(self):
raise NotImplementedError()
@property
def default_vpc(self):
raise NotImplementedError()
def connect_to_region(self, region_name):
"""
Connects to a given region if it exists, raises RegionDoesNotExist
otherwise. Once connected, this object will return only data from the
given region.
"""
self._region = self.get_region(region_name)
self._platforms = None
self._default_vpc = None
self.reload()
return self
@property
def region(self):
"""
Returns the current EC2 region used by this EasyEC2 object
"""
return self._region
@property
def regions(self):
"""
This property returns all Azure Locations, caching the results the first
time a request is made to Azure
"""
if not self._regions:
self._regions = {}
regions = self.conn.list_locations()
for region in regions:
self._regions[region.name] = region
return self._regions
def get_region(self, region_name):
"""
Returns Azure Location object if it exists, raises RegionDoesNotExist
otherwise.
"""
if region_name not in self.regions:
raise exception.RegionDoesNotExist(region_name)
return self.regions.get(region_name)
def list_regions(self):
"""
Print name/services for all Azure locations
"""
regions = self.regions.items()
regions.sort(reverse=True)
for name, region in regions:
print 'name: ', name
print 'services: ', ', '.join(region.available_services)
print
@property
def registered_images(self):
raise NotImplementedError()
@property
def executable_images(self):
raise NotImplementedError()
def get_registered_image(self, image_id):
raise NotImplementedError()
def _wait_for_group_deletion_propagation(self, group):
raise NotImplementedError()
def get_subnet(self, subnet_id):
raise NotImplementedError()
def get_subnets(self, filters=None):
raise NotImplementedError()
def get_internet_gateways(self, filters=None):
raise NotImplementedError()
def get_route_tables(self, filters=None):
raise NotImplementedError()
def get_network_spec(self, *args, **kwargs):
raise NotImplementedError()
def get_network_collection(self, *args, **kwargs):
raise NotImplementedError()
def delete_group(self, group, max_retries=60, retry_delay=5):
"""
This method deletes a security or placement group using group.delete()
but in the case that group.delete() throws a DependencyViolation error
or InvalidPlacementGroup.InUse error it will keep retrying until it's
successful. Waits 5 seconds between each retry.
"""
if isinstance(group, SecurityGroup):
label = 'security'
elif isinstance(group, PlacementGroup):
label = 'placement'
s = utils.get_spinner("Removing %s group: %s" % (label, group.name))
try:
for i in range(max_retries):
try:
self.conn.delete_hosted_service(group.id)
return
except azure.WindowsAzureError as e:
if i == max_retries - 1:
raise
# if e.error_code == 'DependencyViolation':
# log.debug('DependencyViolation error - retrying in 5s',
# exc_info=True)
# time.sleep(retry_delay)
# elif e.error_code == 'InvalidPlacementGroup.InUse':
# log.debug('Placement group in use - retrying in 5s',
# exc_info=True)
# time.sleep(retry_delay)
else:
raise
finally:
s.stop()
def create_group(self, name, description, auth_ssh=False, auth_rdp=False,
auth_group_traffic=False, vpc_id=None):
"""
Create security group with name/description. auth_ssh=True
will open port 22 to world (0.0.0.0/0). auth_group_traffic
will allow all traffic between instances in the same security
group
"""
log.info("Creating security group %s..." % name)
# sg = self.conn.create_security_group(name, description, vpc_id=vpc_id)
# if not self.get_group_or_none(name):
# s = utils.get_spinner("Waiting for security group %s..." % name)
# try:
# while not self.get_group_or_none(name):
# time.sleep(3)
# finally:
# s.stop()
# if auth_ssh:
# ssh_port = static.DEFAULT_SSH_PORT
# sg.authorize(ip_protocol='tcp', from_port=ssh_port,
# to_port=ssh_port, cidr_ip=static.WORLD_CIDRIP)
# if auth_rdp:
# rdp_port = static.DEFAULT_RDP_PORT
# sg.authorize(ip_protocol='tcp', from_port=rdp_port,
# to_port=rdp_port, cidr_ip=static.WORLD_CIDRIP)
# if auth_group_traffic:
# sg.authorize(src_group=sg, ip_protocol='icmp', from_port=-1,
# to_port=-1)
# sg.authorize(src_group=sg, ip_protocol='tcp', from_port=1,
# to_port=65535)
# sg.authorize(src_group=sg, ip_protocol='udp', from_port=1,
# to_port=65535)
# return sg
pg = self.get_or_create_placement_group(name)
if not pg:
raise exception.PlacementGroupDoesNotExist(name)
sg = SecurityGroup(pg, self)
return sg
def get_all_security_groups(self, groupnames=[]):
"""
Returns all security groups
groupnames - optional list of group names to retrieve
"""
filters = {}
if groupnames:
filters = {'group-name': groupnames}
return self.get_security_groups(filters=filters)
def get_group_or_none(self, name):
"""
Returns group with name if it exists otherwise returns None
"""
try:
return self.get_security_group(name)
except exception.SecurityGroupDoesNotExist:
pass
def get_or_create_group(self, name, description, auth_ssh=True,
auth_group_traffic=False, vpc_id=None):
"""
Try to return a security group by name. If the group is not found,
attempt to create it. Description only applies to creation.
auth_ssh - authorize ssh traffic from world
auth_group_traffic - authorizes all traffic between members of the
group
"""
sg = self.get_group_or_none(name)
if not sg:
sg = self.create_group(name, description, auth_ssh=auth_ssh,
auth_group_traffic=auth_group_traffic,
vpc_id=vpc_id)
return sg
def get_security_group(self, groupname):
try:
return self.get_security_groups(
filters={'group-name': groupname})[0]
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidGroup.NotFound":
# raise exception.SecurityGroupDoesNotExist(groupname)
raise
except IndexError:
raise exception.SecurityGroupDoesNotExist(groupname)
def get_security_groups(self, filters=None):
"""
Returns all security groups on this cloud account
"""
#return self.conn.get_all_security_groups(filters=filters)
pgs = self.get_placement_groups(filters)
sgs = [SecurityGroup(pg, self) for pg in pgs]
return sgs
def get_permission_or_none(self, group, ip_protocol, from_port, to_port,
cidr_ip=None):
raise NotImplementedError()
def has_permission(self, group, ip_protocol, from_port, to_port, cidr_ip):
raise NotImplementedError()
def _azurify(self, name):
return '%s-%s' % (name.strip('@').replace('_', '-'), self.subscription_name)
@classmethod
def _unazurify(self, name):
return '@tc-%s' % ('_'.join(name.split('-')[1:-1])) #TODO this will not restore '-' if it was originally present
def create_placement_group(self, name):
"""
Create a new placement group for your account.
This will create the placement group within the region you
are currently connected to.
"""
log.info("Creating placement group %s..." % name)
# success = self.conn.create_placement_group(name)
# if not success:
# log.debug(
# "failed to create placement group '%s' (error = %s)" %
# (name, success))
# raise exception.AWSError(
# "failed to create placement group '%s'" % name)
# pg = self.get_placement_group_or_none(name)
# while not pg:
# log.info("Waiting for placement group %s..." % name)
# time.sleep(3)
# pg = self.get_placement_group_or_none(name)
# return pg
name = self._azurify(name)
available = self.conn.check_hosted_service_name_availability(name).result
if available:
self.conn.create_hosted_service(service_name=name,
label=name,
description='TethysCluster-%s' % static.VERSION.replace('.', '_'),
location=self.region.name)
service = self.conn.get_hosted_service_properties(name)
pg = PlacementGroup(service, self)
return pg
else:
raise azure.WindowsAzureError('Hosted Service already exists')
def get_placement_groups(self, filters=None):
"""
Returns all hosted services
"""
#return self.conn.get_all_placement_groups(filters=filters)
hosted_services = self.list_all_hosted_services()
group_names = filters['group-name']
group_names = group_names if isinstance(group_names, list) else [group_names]
group_names = [self._azurify(name) for name in group_names]
#'''
def match(name, filters):
for filter in filters:
if fnmatch.fnmatch(name, filter):
return True
return False
services = [self.conn.get_hosted_service_properties(service_name) for service_name in hosted_services if
match(service_name, group_names)]
'''
services = []
for group_name in group_names:
srvs = fnmatch.filter(hosted_services, group_name)
services.extend([self.conn.get_hosted_service_properties(service_name) for service_name in srvs])
#'''
pgs = [PlacementGroup(service, self) for service in services]
return pgs
def get_placement_group(self, groupname=None):
try:
return self.get_placement_groups(filters={'group-name':
groupname})[0]
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidPlacementGroup.Unknown":
# raise exception.PlacementGroupDoesNotExist(groupname)
raise
except IndexError:
raise exception.PlacementGroupDoesNotExist(groupname)
def get_placement_group_or_none(self, name):
"""
Returns placement group with name if it exists otherwise returns None
"""
try:
return self.get_placement_group(name)
except exception.PlacementGroupDoesNotExist:
pass
def get_or_create_placement_group(self, name):
"""
Try to return a placement group by name.
If the group is not found, attempt to create it.
"""
try:
return self.get_placement_group(name)
except exception.PlacementGroupDoesNotExist:
pg = self.create_placement_group(name)
return pg
def list_all_hosted_services(self):
services = []
hosted_services = self.conn.list_hosted_services()
for service in hosted_services:
services.append(service.service_name)
return services
def request_instances(self, image_id, price=None, instance_type='Small',
min_count=1, max_count=1, count=1, key_name=None,
security_groups=None, security_group_ids=None,
launch_group=None,
availability_zone_group=None, placement=None,
user_data=None, placement_group=None,
block_device_map=None, subnet_id=None,
network_interfaces=None, **kwargs):
"""
Convenience method for running spot or flat-rate instances
"""
#I just deleted a bunch of code that handled block device maps. I'm not sure how this applies in Azure
# kwargs = dict(min_count=min_count,
# max_count=max_count,
# security_groups=security_groups,
# instance_type=instance_type,
# key_name=key_name,
# subnet_id=subnet_id,
# placement=placement,
# placement_group=placement_group,
# user_data=user_data,
# block_device_map=block_device_map,
# network_interfaces=network_interfaces)
kwargs = dict(aliases=kwargs['aliases'], #TODO can I get the aliases from binary user_data?
placement_group=self._azurify(placement_group),
instance_type=instance_type,
key_name=key_name,
user_data=user_data)
instances = self.run_instances(image_id, **kwargs)
return Reservation(instances)
def request_spot_instances(self, price, image_id, instance_type='m1.small',
count=1, launch_group=None, key_name=None,
availability_zone_group=None,
security_group_ids=None, subnet_id=None,
placement=None, placement_group=None,
user_data=None, block_device_map=None,
network_interfaces=None):
raise NotImplementedError()
def _wait_for_propagation(self, obj_ids, fetch_func, id_filter, obj_name,
max_retries=60, interval=5):
"""
Wait for a list of object ids to appear in the Azure API. Requires a
function that fetches the objects and also takes a filters kwarg. The
id_filter specifies the id filter to use for the objects and
obj_name describes the objects for log messages.
"""
filters = {id_filter: obj_ids}
num_objs = len(obj_ids)
num_reqs = 0
reqs_ids = []
max_retries = max(1, max_retries)
interval = max(1, interval)
widgets = ['', progressbar.Fraction(), ' ',
progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
progressbar.Percentage(), ' ', ' ']
log.info("Waiting for %s to propagate..." % obj_name)
pbar = progressbar.ProgressBar(widgets=widgets,
maxval=num_objs).start()
try:
for i in range(max_retries + 1):
reqs = fetch_func(filters=filters)
reqs_ids = [req.id for req in reqs]
num_reqs = len(reqs)
pbar.update(num_reqs)
if num_reqs != num_objs:
log.debug("%d: only %d/%d %s have "
"propagated - sleeping..." %
(i, num_reqs, num_objs, obj_name))
if i != max_retries:
time.sleep(interval)
else:
return
finally:
if not pbar.finished:
pbar.finish()
missing = [oid for oid in obj_ids if oid not in reqs_ids]
raise exception.PropagationException(
"Failed to fetch %d/%d %s after %d seconds: %s" %
(num_reqs, num_objs, obj_name, max_retries * interval,
', '.join(missing)))
def wait_for_propagation(self, instances=None, spot_requests=None,
max_retries=60, interval=5):
"""
Wait for newly created instances to register in
the Azure API by repeatedly calling get_all_instances.
Calling this method directly after creating new instances or spot
requests before operating on them helps to avoid eventual consistency
errors about instances not existing.
"""
if instances:
instance_ids = [getattr(i, 'id', i) for i in instances]
self._wait_for_propagation(
instance_ids, self.get_all_instances, 'instance-id',
'instances', max_retries=max_retries, interval=interval)
def run_instances(self, image_id, aliases=None, placement_group=None, instance_type='Small', key_name=None,
security_groups=None, user_data=None, **kwargs):
def add_key_to_service(service_name, key_name):
from tethyscluster import config
SERVICE_CERT_FORMAT = 'pfx'
cfg = config.get_config()
key_location = cfg.get_key(key_name).get('key_location')
cert = sshutils.get_or_generate_signed_certificate_from_key(key_location)
service_cert_file_data = sshutils.get_64base_encoded_certificate(cert)
fingerprint = sshutils.get_certificate_fingerprint(cert)
result = self.conn.add_service_certificate(service_name,
service_cert_file_data, SERVICE_CERT_FORMAT, '')
self.conn.wait_for_operation_status(result.request_id,
timeout=300,
progress_callback=lambda x: sys.stdout.write(''),
success_callback=lambda x: sys.stdout.write(''))
properties = self.conn.get_hosted_service_properties(service_name, True).hosted_service_properties
properties.extended_properties['key_name'] = key_name
self.conn.update_hosted_service(service_name, properties.label, properties.description,
properties.extended_properties)
return fingerprint, key_location
def get_endpoints(rdp_port, ssh_port):
endpoint_config = ConfigurationSet()
endpoint_config.configuration_set_type = 'NetworkConfiguration'
endpoint1 = ConfigurationSetInputEndpoint(name='rdp',
protocol='tcp',
port=rdp_port,
local_port='3389',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False)
endpoint2 = ConfigurationSetInputEndpoint(name='ssh',
protocol='tcp',
port=ssh_port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False)
#endpoints must be specified as elements in a list
endpoint_config.input_endpoints.input_endpoints.append(endpoint1)
endpoint_config.input_endpoints.input_endpoints.append(endpoint2)
return endpoint_config
user_name='tethysadmin'
password = '@tc-tethysadmin1'
image = self.get_image(image_id)
os = image.os_disk_configuration.os
if os =='Windows':
hostname = 'computer_name'
system_config = WindowsConfigurationSet(admin_password=password,
reset_password_on_first_logon=False,
enable_automatic_updates=True,
time_zone=None,
admin_username=user_name,
custom_data=user_data)
system_config.domain_join = None #I don't know what this does or why it is needed
system_config.win_rm = None #I don't know what this does or why it is needed
elif os == 'Linux':
hostname = 'host_name'
password = None
system_config = LinuxConfigurationSet(user_name=user_name,
user_password=password,
disable_ssh_password_authentication=False,
custom_data=user_data)
if key_name:
fingerprint, key_location = add_key_to_service(placement_group, key_name)
thumbprint = fingerprint.replace(':', '')
ssh = SSH()
public_key = PublicKey(thumbprint, key_location)
key_pairs = KeyPair(thumbprint, key_location)
ssh.public_keys.public_keys.append(public_key)
ssh.key_pairs.key_pairs.append(key_pairs)
system_config.ssh = ssh
else:
raise Exception('%s is not a supported os' % (os,))
from userdata import unbundle_userdata
user_data = unbundle_userdata(user_data)
aliases = user_data['_tc_aliases.txt'].split('\n')[2:]
for alias in aliases:
# print alias
ssh_port = static.DEFAULT_SSH_PORT
rdp_port = static.DEFAULT_RDP_PORT
alias_parts = re.split('node', alias)
if len(alias_parts) == 2:
index = alias_parts[1]
rdp_port = '33' + index
ssh_port = str(ssh_port) + index
system_config.__dict__[hostname] = alias
kwargs = dict(service_name=placement_group,
deployment_name=placement_group,
role_name=alias,
system_config=system_config,
os_virtual_hard_disk=None,
network_config=get_endpoints(rdp_port, ssh_port),
role_size=instance_type,
vm_image_name=image_id,
)
try:
deployment = self.conn.get_deployment_by_name(placement_group, placement_group)
except WindowsAzureMissingResourceError as e:
deployment = None
if not deployment:
result = self.conn.create_virtual_machine_deployment(deployment_slot='production',
label=alias, **kwargs)
else:
result = self.conn.add_role(**kwargs)
self.conn.wait_for_operation_status(result.request_id,
timeout=300,
progress_callback=lambda x: sys.stdout.write(''),
success_callback=lambda x: sys.stdout.write(''))
ids = [(placement_group, placement_group, alias) for alias in aliases]
return self.get_all_instances(instance_ids=ids,
filters={'instance.group-name': self._unazurify(placement_group)})
def create_image(self, instance_id, name, description=None,
no_reboot=False):
raise NotImplementedError()
def register_image(self, name, description=None, image_location=None,
architecture=None, kernel_id=None, ramdisk_id=None,
root_device_name=None, block_device_map=None,
virtualization_type=None, sriov_net_support=None,
snapshot_id=None):
raise NotImplementedError()
def delete_keypair(self, name):
raise NotImplementedError()
def import_keypair(self, name, rsa_key_file):
raise NotImplementedError()
def create_keypair(self, name, output_file=None):
raise NotImplementedError()
def get_keypairs(self, filters={}):
certs = self.conn.list_management_certificates().subscription_certificates
for cert in certs:
cert.fingerprint = cert.subscription_certificate_thumbprint
if 'key-name' in filters.keys():
certs = [cert for cert in certs if cert.fingerprint == filters['key-name']]
return certs
def get_keypair(self, keypair):
try:
return self.get_keypairs(filters={'key-name': keypair})[0]
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidKeyPair.NotFound":
# raise exception.KeyPairDoesNotExist(keypair)
raise
except IndexError:
raise exception.KeyPairDoesNotExist(keypair)
def get_keypair_or_none(self, keypair):
try:
return self.get_keypair(keypair)
except exception.KeyPairDoesNotExist:
pass
def __print_header(self, msg):
raise NotImplementedError()
def get_image_name(self, img):
raise NotImplementedError()
def get_instance_user_data(self, instance_id):
try:
from tethyscluster import config
# attrs = self.conn.get_instance_attribute(instance_id, 'userData')
# user_data = attrs.get('userData', '') or ''
instance = self.get_instance(instance_id)
cfg = config.get_config()
key_location = cfg.get_key(instance.key_name).get('key_location')
ssh = sshutils.SSHClient(instance.ip_address,
username='root',
port = instance.ports['ssh'],
private_key=key_location)
user_data_file = ssh.remote_file('/var/lib/waagent/ovf-env.xml', 'r')
text = user_data_file.read()
match = re.search('<CustomData>(.*?)</CustomData>', text)
raw = match.group(1)
user_data = base64.b64decode(raw)
return user_data
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidInstanceID.NotFound":
# raise exception.InstanceDoesNotExist(instance_id)
raise e
except Exception, e:
raise e
def get_securityids_from_names(self, groupnames):
raise NotImplementedError()
def get_all_instances(self, instance_ids=[], filters={}):
if 'instance.group-name' in filters.keys():
hosted_services = [self._azurify(filters['instance.group-name'])]
else:
hosted_services = self.list_all_hosted_services()
instances = []
for name in hosted_services:
try:
service = self.conn.get_hosted_service_properties(name, True)
for deployment in service.deployments.deployments:
insts = deployment.role_instance_list.role_instances
rols = deployment.role_list.roles
assert len(insts) == len(rols)
for i in range(0,len(insts)):
role = rols[i]
if role.role_type == 'PersistentVMRole':
instance = Instance(service, deployment, insts[i], role, self)
instances.append(instance)
except WindowsAzureMissingResourceError as e:
pass
if instance_ids:
instances = [instance for instance in instances if instance.id in instance_ids]
if filters:
# filters = {'instance-state-name': states,
# 'instance.group-name': self._security_group}
if 'instance-state-name' in filters.keys():
states = filters['instance-state-name']
states = states if isinstance(states, list) else [states]
instances = [instance for instance in instances if instance.state in states]
if 'instance-id' in filters.keys():
instance_ids = filters['instance-id']
instance_ids = instance_ids if isinstance(instance_ids, list) else [instance_ids]
instances = [instance for instance in instances if instance.id in instance_ids]
return instances
def get_instance(self, instance_id):
try:
return self.get_all_instances(
filters={'instance-id': instance_id})[0]
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidInstanceID.NotFound":
# raise exception.InstanceDoesNotExist(instance_id)
raise
except IndexError:
raise exception.InstanceDoesNotExist(instance_id)
def is_valid_conn(self):
try:
self.get_all_instances()
return True
except azure.WindowsAzureError as e:
cred_errs = [] #add error codes for Azure authorization errors here
# if e.error_code in cred_errs:
# return False
raise
def get_all_spot_requests(self, spot_ids=[], filters=None):
return []
def list_all_spot_instances(self, show_closed=False):
log.info("No spot instance requests found...")
return
def show_instance(self, instance):
raise NotImplementedError()
def list_all_instances(self, show_terminated=False):
raise NotImplementedError()
def list_images(self, images, sort_key=None, reverse=False):
raise NotImplementedError()
def list_registered_images(self):
raise NotImplementedError()
def list_executable_images(self):
raise NotImplementedError()
def __list_images(self, msg, imgs):
raise NotImplementedError()
def remove_image_files(self, image_name, pretend=True):
raise NotImplementedError()
@print_timing("Removing image")
def remove_image(self, image_name, pretend=True, keep_image_data=True):
raise NotImplementedError()
def list_starcluster_public_images(self):
raise NotImplementedError()
def create_volume(self, size, zone, snapshot_id=None):
raise NotImplementedError()
def remove_volume(self, volume_id):
raise NotImplementedError()
def list_keypairs(self):
raise NotImplementedError()
def list_zones(self, region=None):
raise NotImplementedError()
def get_zones(self, filters=None):
raise NotImplementedError()
def get_zone(self, zone):
return None
def get_zone_or_none(self, zone):
return None
def create_s3_image(self, instance_id, key_location, aws_user_id,
ec2_cert, ec2_private_key, bucket, image_name="image",
description=None, kernel_id=None, ramdisk_id=None,
remove_image_files=False, **kwargs):
raise NotImplementedError()
def create_ebs_image(self, instance_id, key_location, name,
description=None, snapshot_description=None,
kernel_id=None, ramdisk_id=None, root_vol_size=15,
**kwargs):
raise NotImplementedError()
def get_images(self, filters=None):
# start = time.time()
# image = self.conn.list_vm_images(filters={'name':image_id})[0]
# print time.time()-start
# start = time.time()
image_id = filters['image-id']
all_images = self.conn.list_vm_images()
images = []
for image in all_images:
if image.name == image_id:
image.id = image.name
image.state = 'available' #required for cluster validation. Are Azure images ever not available?
image.architecture = 'x86_64'
image.virtualization_type = None
image.root_device_type = None
images.append(image)
# print time.time()-start
return images
def get_image(self, image_id):
"""
Return image object representing an AMI.
Raises exception.AMIDoesNotExist if unsuccessful
"""
try:
return self.get_images(filters={'image-id': image_id})[0]
except azure.WindowsAzureError as e:
# if e.error_code == "InvalidAMIID.NotFound":
# raise exception.AMIDoesNotExist(image_id)
raise
except IndexError:
raise exception.AMIDoesNotExist(image_id)
def get_image_or_none(self, image_id):
"""
Return image object representing an AMI.
Returns None if unsuccessful
"""
try:
return self.get_image(image_id)
except exception.AMIDoesNotExist:
pass
def get_image_files(self, image):
raise NotImplementedError()
def get_image_bucket(self, image):
raise NotImplementedError()
def get_image_manifest(self, image):
raise NotImplementedError()
@print_timing("Migrating image")
def migrate_image(self, image_id, destbucket, migrate_manifest=False,
kernel_id=None, ramdisk_id=None, region=None, cert=None,
private_key=None):
raise NotImplementedError()
def copy_image(self, source_region, source_image_id, name=None,
description=None, client_token=None, wait_for_copy=False):
raise NotImplementedError()
def wait_for_ami(self, ami):
raise NotImplementedError()
def copy_image_to_all_regions(self, source_region, source_image_id,
name=None, description=None,
client_token=None, add_region_to_desc=False,
wait_for_copies=False):
raise NotImplementedError()
def create_block_device_map(self, root_snapshot_id=None,
root_device_name='/dev/sda1',
add_ephemeral_drives=False,
num_ephemeral_drives=24, instance_store=False):
raise NotImplementedError()
@print_timing("Downloading image")
def download_image_files(self, image_id, destdir):
raise NotImplementedError()
def list_image_files(self, image_id):
raise NotImplementedError()
@property
def instances(self):
raise NotImplementedError()
@property
def keypairs(self):
raise NotImplementedError()
def terminate_instances(self, instances=None):
raise NotImplementedError()
def get_volumes(self, filters=None):
raise NotImplementedError()
def get_volume(self, volume_id):
raise NotImplementedError()
def get_volume_or_none(self, volume_id):
raise NotImplementedError()
def wait_for_volume(self, volume, status=None, state=None,
refresh_interval=5, log_func=log.info):
raise NotImplementedError()
def wait_for_snapshot(self, snapshot, refresh_interval=30):
raise NotImplementedError()
def create_snapshot(self, vol, description=None, wait_for_snapshot=False,
refresh_interval=30):
raise NotImplementedError()
def get_snapshots(self, volume_ids=[], filters=None, owner='self'):
raise NotImplementedError()
def get_snapshot(self, snapshot_id, owner='self'):
raise NotImplementedError()
def list_volumes(self, volume_id=None, status=None, attach_status=None,
size=None, zone=None, snapshot_id=None,
show_deleted=False, tags=None, name=None):
raise NotImplementedError()
def get_spot_history(self, instance_type, start=None, end=None, zone=None,
plot=False, plot_server_interface="localhost",
plot_launch_browser=True, plot_web_browser=None,
plot_shutdown_server=True, classic=False, vpc=False):
raise NotImplementedError()
def show_console_output(self, instance_id):
raise NotImplementedError()
class EasyAzureStorage(EasyAzure):
DefaultHost = ''
def __init__(self, aws_access_key_id, certificate_path,
aws_s3_path='/', aws_port=None, aws_is_secure=True,
aws_s3_host=DefaultHost, aws_proxy=None, aws_proxy_port=None,
aws_proxy_user=None, aws_proxy_pass=None,
aws_validate_certs=True, **kwargs):
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
def create_bucket(self, bucket_name):
raise NotImplementedError()
def bucket_exists(self, bucket_name):
raise NotImplementedError()
def get_or_create_bucket(self, bucket_name):
raise NotImplementedError()
def get_bucket_or_none(self, bucket_name):
raise NotImplementedError()
def get_bucket(self, bucketname):
raise NotImplementedError()
def list_bucket(self, bucketname):
raise NotImplementedError()
def get_buckets(self):
raise NotImplementedError()
def get_bucket_files(self, bucketname):
raise NotImplementedError()
class PlacementGroup(object):
def __init__(self, service, easy_sms):
self.name = easy_sms._unazurify(service.service_name)
self.id = service.service_name
self._properties = easy_sms.conn.get_hosted_service_properties(self.id, True).hosted_service_properties
class SecurityGroup(object):
def __init__(self, pg, easy_sms):
self.name = pg.name
self.id = pg.id
self.connection = easy_sms
self.vpc_id = None
self._properties = pg._properties
self._service_tags = self._properties.extended_properties
self.tags = self._load_tags()
def instances(self):
return self.connection.get_all_instances(filters={'instance.group-name': self.name})
def add_tag(self, key, value):
self.tags[key] = value
self._update_service_tags(key, value)
def _update_service_tags(self, key, value):
k = key.replace('@tc-', 'tc_')
self._service_tags[k] = value
self.connection.conn.update_hosted_service(self.id, self._properties.label, self._properties.description,
self._service_tags)
def _load_tags(self):
tags = dict()
for key,value in self._service_tags.iteritems():
k = key.replace('tc_', '@tc-')
tags[k] = value
return tags
class Reservation(object):
def __init__(self, instances):
self.instances = instances
def __str__(self):
return self.instances.__str__()
class Instance(object):
POWER_STATES = {'Starting': 'pending', 'Started': 'running' , 'Stopping': 'stopping', 'Stopped': 'stopped',
'Unknown': 'terminated'}
def __init__(self, service, deployment, role_instance, role, easy_sms):
self.role_instance = role_instance
self.role = role
self.service_properties = easy_sms.conn.get_hosted_service_properties(service.service_name,
True).hosted_service_properties
self.id = (service.service_name, deployment.name, role.role_name)
self.public_dns_name = deployment.url
self.private_dns_name = deployment.url
self.state = self.POWER_STATES[role_instance.power_state]
self.state_code = None
self.previous_state = None
self.previous_state_code = None
self.key_name = None #TODO look at CertificateStore on Azure api
self.instance_type = role.role_size
self.launch_time = deployment.created_time
self.image_id = role.os_virtual_hard_disk.source_image_name
self.placement = None
self.placement_group = service.service_name
self.placement_tenancy = None
self.kernel = None
self.ramdisk = None
self.architecture = None
self.hypervisor = None
self.virtualization_type = None
self.product_codes = None
self.ami_launch_index = None
self.monitored = None
self.monitoring_state = None
self.spot_instance_request_id = None
self.subnet_id = None
self.vpc_id = None
self.private_ip_address = role_instance.ip_address
self.ip_address = None
self.platform = role.os_virtual_hard_disk.os.lower()
self.root_device_name = None
self.root_device_type = None
self.block_device_mapping = None
self.state_reason = role_instance.instance_state_details
self.groups = None
self.interfaces = None
self.ebs_optimized = None
self.instance_profile = None
self.ports = dict()
if role_instance.instance_endpoints:
for endpoint in role_instance.instance_endpoints:
self.ports[endpoint.name] = int(endpoint.public_port)
self.ip_address = role_instance.instance_endpoints[0].vip
self.connection = easy_sms
self.dns_name = self.ip_address #for some reason ssh not working with: deployment.url
self.tags = dict()
self.add_tag('alias', role.role_name)
self.add_tag('Name', role.role_name)
if 'key_name' in self.service_properties.extended_properties.keys():
self.key_name = self.service_properties.extended_properties['key_name']
def __repr__(self):
return '<Azure Instance: %s' % (self.id,)
def add_tag(self, k, v):
self.tags[k]=v
def terminate(self):
try:
self._terminate_role(max_tries=5, timeout=20)
except azure.WindowsAzureError, e:
try:
self.connection.conn.delete_deployment(self.id[0], self.id[1])
except WindowsAzureMissingResourceError, e:
pass
def _terminate_role(self, max_tries=1, timeout=30):
try:
self.connection.conn.delete_role(*self.id)
except WindowsAzureConflictError, e:
max_tries -= 1
if max_tries < 1:
raise
log.info('Waiting for instance to be available...')
time.sleep(timeout)
self._terminate_role(max_tries, timeout)
if __name__ == "__main__":
# from tethyscluster.config import get_easy_ec2
# ec2 = get_easy_ec2()
# ec2.list_all_instances()
# ec2.list_registered_images()
import unittest
class TestAzureUtils(unittest.TestCase):
from tethyscluster.config import TethysClusterConfig
from tethyscluster.logger import configure_tc_logging
configure_tc_logging(use_console=True)
cfg = TethysClusterConfig().load()
easySMS = cfg.get_easy_sms()
def test_regions(self):
# print self.easySMS.list_regions()
# print self.easySMS.region.name
region = 'East US 2'
self.easySMS.connect_to_region(region)
expected = region
actual = self.easySMS.region.name
msg = 'checking region gets set properly'
self.assertEqual(expected, actual, '%s\nExpected: %s\nActual: %s\n' % (msg, expected, actual))
def test_invalid_region(self):
method = self.easySMS.connect_to_region
args = ('Invalid Region')
self.assertRaises(exception.RegionDoesNotExist, method, args)
#'''
def run_instance(self):
'''
def test_run_instances(self):
#'''
# subscription = self.easySMS.conn.list_subscriptions()[0]
service_name = 'tc-test-cluster-%s' % (self.subscription_id,)
service_desc = 'TethysCluster-%s' % static.VERSION.replace('.', '_')
available = self.easySMS.conn.check_hosted_service_name_availability(service_name).result
if available:
service = self.easySMS.conn.create_hosted_service(service_name=service_name,
label=service_name,
description=service_desc,
location=self.easySMS.region.name)
else:
print 'hosted service already exists'
service = self.easySMS.conn.get_hosted_service_properties(service_name, True)
master_alias = 'master'
image_id = 'tc-linux12-2'
# id = self.easySMS.run_instances(image_id, master_alias, service_name).request_id
# print id
# self.easySMS.conn.wait_for_operation_status(id)
aliases = [master_alias]
for node in range(1,2):
alias = 'node00%s' % (node,)
aliases.append(alias)
# id = self.easySMS.run_instances(image_id, alias, service_name).request_id
# self.easySMS.conn.wait_for_operation_status(id)
self.easySMS.run_instances(image_id, aliases, service_name)
pprint(service.hosted_service_properties.__dict__)
print service.deployments.deployments
def test_vm_with_ssh(self):
image_id = 'tc-linux12-2'
pg = self.easySMS.get_or_create_placement_group('ssh_key-test')
self.easySMS.run_instances(image_id, ['master'], pg.service_name, key_name='tethyscert')
def test_get_all_instances(self):
instances = self.easySMS.get_all_instances()
print [instance.role_name for instance in instances]
suite = unittest.TestLoader().loadTestsFromTestCase(TestAzureUtils)
unittest.TextTestRunner(verbosity=2).run(suite)
# props = sms.get_hosted_service_properties(service.service_name, True)
# if len(props.deployments) > 0 and len(props.deployments[0].role_list) > 0:
# if props.deployments[0].role_list[0].role_type == 'PersistentVMRole':
# print(props.deployments[0].role_list[0].role_name)
# sms = ServiceManagementService(subscription_id, certificate_path)
# pprint(sms.__dict__)
# '''
# {'_filter': <bound method _HTTPClient.perform_request of <azure.http.httpclient._HTTPClient object at 0x1034486d0>>,
# '_httpclient': <azure.http.httpclient._HTTPClient object at 0x1034486d0>,
# 'cert_file': '/Users/sdc50/.tethyscluster/Azpas300EF16037.pem',
# 'content_type': 'application/atom+xml;type=entry;charset=utf-8',
# 'host': 'management.core.windows.net',
# 'request_session': None,
# 'requestid': None,
# 'subscription_id': '4477d6f7-b8e4-4bcd-a7ff-c34d1d37238c',
# 'x_ms_version': '2014-06-01'}
# '''
# services = sms.list_hosted_services()
# for k,v in services[0].hosted_service_properties.__dict__.iteritems():
# print '%s:%s' % (k, v)
# """
# status:Created
# description:
# label:BYU-RAPID
# location:North Europe
# affinity_group:
# date_created:2014-10-24T20:28:38Z
# extended_properties:{u'ResourceGroup': u'BYU-RAPID', u'ResourceLocation': u'North Europe'}
# date_last_modified:2014-10-24T20:29:03Z
# """
# services = [sms.get_hosted_service_properties('ciwater-condorm', True), sms.get_hosted_service_properties(name, True)]
#
# for service in services:
# name = service.service_name
# print('Service name: ' + name)
# print('Management URL: ' + service.url)
# print 'Deployments: ', [[role.__dict__ for role in deployment.role_instance_list.role_instances] for
# deployment in service.deployments.deployments]
# print('Location: ' + service.hosted_service_properties.location)
# print('Properties: ' + str(service.hosted_service_properties.__dict__))
# print('')
# images = sms.list_vm_images()
# for k,v in images[0].__dict__.iteritems():
# print '%s:%s' % (k, v)
# '''
# pricing_detail_link:None
# eula:None
# deployment_name:ciwater-condorm
# service_name:ciwater-condorm
# is_premium:False
# created_time:2015-01-07T22:45:19.3314472Z
# publisher_name:None
# category:User
# os_disk_configuration:<azure.servicemanagement.OSDiskConfiguration object at 0x103d2d5d0>
# icon_uri:None
# label:condor-image
# show_in_gui:False
# location:West US
# recommended_vm_size:None
# description:None
# data_disk_configurations:<azure.servicemanagement.DataDiskConfigurations object at 0x103d2dd10>
# image_family:None
# modified_time:2015-01-07T22:45:19.3314472Z
# role_name:ciwater-condorm
# affinity_group:None
# privacy_uri:None
# name:condor-image
# language:None
# small_icon_uri:None
# published_date:None
# Image name: condor-image
# Location: West US
# '''
# for image in images:
# print('Image name: ' + image.name)
# print('OS: ' + image.os_disk_configuration.os)
# print('Location: ' + image.location)
# print('')
#'''
# # Name of an os image as returned by list_os_images
# image_name = 'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_10-amd64-server-20150202-en-us-30GB'
# media_link = 'https://ciwater.blob.core.windows.net/vhds/tethys1.tethys1.tethys1.status'
# os_hd = OSVirtualHardDisk(image_name, media_link)
#from OS Image
# sms.create_virtual_machine_deployment(service_name=name,
# deployment_name=name,
# deployment_slot='production',
# label=name,
# role_name=name,
# system_config=linux_config,
# os_virtual_hard_disk=os_hd,
# role_size='Small')
#'''
|
tethysplatform/TethysCluster
|
tethyscluster/azureutils.py
|
Python
|
lgpl-3.0
| 56,078
|
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
from django_pgviews.view import clear_view, View, MaterializedView
log = logging.getLogger('django_pgviews.sync_pgviews')
class Command(BaseCommand):
help = """Clear Postgres views. Use this before running a migration"""
def handle(self, **options):
"""
"""
for view_cls in apps.get_models():
if not (isinstance(view_cls, type) and
issubclass(view_cls, View) and
hasattr(view_cls, 'sql')):
continue
python_name = '{}.{}'.format(view_cls._meta.app_label, view_cls.__name__)
status = clear_view(
connection, view_cls._meta.db_table,
materialized=isinstance(view_cls(), MaterializedView))
if status == 'DROPPED':
msg = 'dropped'
else:
msg = 'not dropped'
log.info("%(python_name)s (%(view_name)s): %(msg)s" % {
'python_name': python_name,
'view_name': view_cls._meta.db_table,
'msg': msg})
|
kennydude/django-pgviews
|
django_pgviews/management/commands/clear_pgviews.py
|
Python
|
unlicense
| 1,195
|
import numpy as np
import pylab
# elephant parameters
p1, p2, p3, p4 = (50 - 30j, 18 + 8j, 12 - 10j, -14 - 60j )
p5 = 40 + 20j # eyepiece
def fourier(t, C):
f = np.zeros(t.shape)
A, B = C.real, C.imag
for k in range(len(C)):
f = f + A[k]*np.cos(k*t) + B[k]*np.sin(k*t)
return f
def elephant(t, p1, p2, p3, p4, p5):
npar = 6
Cx = np.zeros((npar,), dtype='complex')
Cy = np.zeros((npar,), dtype='complex')
Cx[1] = p1.real*1j
Cx[2] = p2.real*1j
Cx[3] = p3.real
Cx[5] = p4.real
Cy[1] = p4.imag + p1.imag*1j
Cy[2] = p2.imag*1j
Cy[3] = p3.imag*1j
x = np.append(fourier(t,Cx), [-p5.imag])
y = np.append(fourier(t,Cy), [p5.imag])
return x,y
x, y = elephant(np.linspace(0,2*np.pi,1000), p1, p2, p3, p4, p5)
pylab.plot(y,-x,'.')
pylab.show()
|
tejaskhot/How-to-fit-an-elephant
|
elephant.py
|
Python
|
unlicense
| 824
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import layers
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32, dtypes.float64]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16, dtypes.float64]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.info("expected = ", e_value)
tf_logging.info("actual = ", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-4)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding,
expected):
tensors = []
dilations = [1, 1]
for (data_format, use_gpu) in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu, err):
total_output_size = 1
total_filter_size = 1
for s in output_sizes:
total_output_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with test_util.device(use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0, t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_input_size = 1
total_output_size = 1
for s in input_sizes:
total_input_size *= s
for s in output_sizes:
total_output_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
explicit_strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=padding,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = sess.run(conv)
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.info("expected = ", value_2)
tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = sess.run(conv)
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.info("expected = ", value_2)
tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
filter_cols, in_depth, out_depth, stride_rows,
stride_cols, padding, test_input, data_format,
use_gpu):
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
else:
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.info("conv_2d gradient error = ", err)
self.assertLess(err, 0.002)
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = sess.run(conv)
tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = sess.run([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = sess.run([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = layers.convolution2d(x, num_outputs, [1, kernel_w])
outputs = x
variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/conv_ops_test.py
|
Python
|
apache-2.0
| 73,074
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with comments '/**/'
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls
>>> tamper('SELECT id FROM users')
'SELECT/**/id/**/FROM/**/users'
"""
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += "/**/"
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += "/**/"
continue
retVal += payload[i]
return retVal
|
glaudsonml/kurgan-ai
|
tools/sqlmap/tamper/space2comment.py
|
Python
|
apache-2.0
| 1,319
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db.sqlalchemy.models.cluster_plugin_link import ClusterPluginLink
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
from oslo_serialization import jsonutils
class TestHandlers(BaseIntegrationTest):
def setUp(self):
super(TestHandlers, self).setUp()
self.cluster = self.env.create_cluster(api=False)
self.cluster_plugin_link = self.env \
.create_cluster_plugin_link(cluster_id=self.cluster.id)
def test_cluster_plugin_link_update(self):
cluster_plugin_link_update = {
'title': 'new title 2',
'description': 'new description 2'
}
resp = self.app.put(
reverse(
'ClusterPluginLinkHandler',
kwargs={'cluster_id': self.cluster['id'],
'obj_id': self.cluster_plugin_link.id}
),
jsonutils.dumps(cluster_plugin_link_update),
headers=self.default_headers
)
self.assertEqual(self.cluster_plugin_link.id, resp.json_body['id'])
self.assertEqual('new title 2', resp.json_body['title'])
self.assertEqual('new description 2', resp.json_body['description'])
self.assertEqual(self.cluster_plugin_link.url, resp.json_body['url'])
def test_cluster_plugin_link_get_with_cluster(self):
resp = self.app.get(
reverse(
'ClusterPluginLinkHandler',
kwargs={'cluster_id': self.cluster['id'],
'obj_id': self.cluster_plugin_link.id}
),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertEqual(self.cluster_plugin_link.id, resp.json_body['id'])
self.assertEqual(self.cluster_plugin_link.title,
resp.json_body['title'])
self.assertEqual(self.cluster_plugin_link.url, resp.json_body['url'])
self.assertEqual(self.cluster_plugin_link.description,
resp.json_body['description'])
self.assertEqual(self.cluster_plugin_link.hidden,
resp.json_body['hidden'])
def test_cluster_plugin_link_not_found(self):
resp = self.app.get(
reverse(
'ClusterPluginLinkHandler',
kwargs={'cluster_id': self.cluster['id'],
'obj_id': self.cluster_plugin_link.id + 1}
),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(404, resp.status_code)
def test_cluster_plugin_link_delete(self):
resp = self.app.delete(
reverse(
'ClusterPluginLinkHandler',
kwargs={'cluster_id': self.cluster['id'],
'obj_id': self.cluster_plugin_link.id}
),
headers=self.default_headers,
)
self.assertEqual(204, resp.status_code)
d_e_query = self.db.query(ClusterPluginLink) \
.filter_by(cluster_id=self.cluster.id)
self.assertEquals(d_e_query.count(), 0)
def test_cluster_plugin_link_patch(self):
cluster_plugin_link_update = {
'title': 'new title 3',
'description': 'new description 3',
'hidden': True
}
resp = self.app.patch(
reverse(
'ClusterPluginLinkHandler',
kwargs={'cluster_id': self.cluster['id'],
'obj_id': self.cluster_plugin_link.id}
),
jsonutils.dumps(cluster_plugin_link_update),
headers=self.default_headers
)
self.assertEqual(self.cluster_plugin_link.id, resp.json_body['id'])
self.assertEqual('new title 3', resp.json_body['title'])
self.assertEqual('new description 3', resp.json_body['description'])
self.assertEqual(self.cluster_plugin_link.url, resp.json_body['url'])
self.assertEqual(True, resp.json_body['hidden'])
|
huntxu/fuel-web
|
nailgun/nailgun/test/integration/test_cluster_plugin_links_handler.py
|
Python
|
apache-2.0
| 4,662
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
# Compare
self.assertAllClose(new, self.evaluate(ref_var))
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
sess.run(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.scatter_nd_update(var, [[1]], [True])
# update1 = tf.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of input\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init)
result = sess.run(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
class ScatterNdTest(test.TestCase):
non_aliasing_add_test = False
def scatter_nd(self, indices, updates, shape, input_=None):
del input_ # input_ is not used in scatter_nd
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes
def testBool(self):
indices = constant_op.constant(
[[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, False, True], dtype=dtypes.bool)
expected = np.array(
[False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant(
[[4], [3], [3], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, True, True], dtype=dtypes.bool)
expected = np.array([
False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError
if context.executing_eagerly() else ValueError):
array_ops.scatter_nd(indices=[0], # this should be indices=[[0]]
updates=[0.0],
shape=[1])
def testString(self):
indices = constant_op.constant([[4], [3], [1], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["four", "three", "one", "seven"],
dtype=dtypes.string)
expected = np.array([b"", b"one", b"", b"three", b"four",
b"", b"", b"seven"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "b", "c"],
dtype=dtypes.string)
expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by different value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "c", "d"],
dtype=dtypes.string)
expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]),
np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])]
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = sess.run(scatter)
self.assertTrue(np.array_equal(result, expected[0]) or
np.array_equal(result, expected[1]))
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
self.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = self.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
self.assertAllEqual(expected_result, self.evaluate(scatter))
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
self.scatter_nd(indices, updates, shape)
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
self.scatter_nd(indices, updates, shape)
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.cached_session():
with self.assertRaisesOpError(
"Indices and updates specified for empty output"):
self.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros([2, 2, 2], dtype=np.int32),
updates: np.zeros([2, 2, 2], dtype=np.int32)
})
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = self.scatter_nd(indices, updates, shape)
with self.cached_session():
self.assertEqual(scatter.eval().size, 0)
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
self.scatter_nd(indices, updates, shape)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of (input|output)\\.shape="):
self.scatter_nd(indices, updates, shape)
def testGradientsRank2ElementUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([1, 4], dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank2SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[3, 4], [1, 2]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank3SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
dtype=dtypes.int32)
updates = constant_op.constant([[[5, 7], [2, 4]], [[1, 3], [6, 8]]],
dtype=dtype)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[[3, 4], [5, 6]], [[1, 2], [7, 8]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testGradientsRank7SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant(
[[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]],
dtype=dtypes.int32)
updates = constant_op.constant(
[[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype)
shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array(
[[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.cached_session():
val = self.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
def testSmokeScatterNdBatch2DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32)
values = array_ops.zeros([3, 5, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim2(self):
with self.cached_session():
indices = array_ops.zeros([0, 2], dtype=dtypes.int32)
values = array_ops.zeros([0, 7])
shape = [4, 6, 7]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self):
with self.cached_session():
indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 2, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
self.scatter_nd(indices, values, shape).eval()
class ScatterNdNonAliasingAddTest(ScatterNdTest):
non_aliasing_add_test = True
def scatter_nd(self, indices, updates, shape, input_=None):
input_ = (input_ if input_ is not None else array_ops.zeros(
shape, dtype=updates.dtype))
return array_ops.scatter_nd_non_aliasing_add(input_, indices, updates)
def testString(self):
# Not supported yet.
pass
if __name__ == "__main__":
test.main()
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/scatter_nd_ops_test.py
|
Python
|
apache-2.0
| 28,025
|
from luigi_bigquery import ResultProxy
import os
import shutil
import tempfile
class MockClient(object):
def __init__(self, datasets, tables, jobs):
self._datasets = datasets
self._tables = tables
self._jobs = jobs
def create_dataset(self, dataset_id, friendly_name=None, description=None, access=None):
dataset_data = _dataset_resource(dataset_id, friendly_name, description, access)
self._datasets.append(dataset_data)
return dataset_data
def get_datasets(self):
return self._datasets
def check_dataset(self, dataset_id):
return dataset_id in [ds['datasetReference']['datasetId'] for ds in self.get_datasets()]
def get_table(self, dataset_id, table_id):
for table in self._tables:
ref = table['tableReference']
if ref['datasetId'] == dataset_id and ref['tableId'] == table_id:
return table
return {}
def delete_table(self, dataset_id, table_id):
pass
def check_job(self, job_id):
job = self._job(job_id)
return (job.get('job_complete', False), int(job.get('total_rows', 0)))
def get_query_schema(self, job_id):
job = self._job(job_id)
return job['schema']
def get_query_rows(self, job_id):
job = self._job(job_id)
return job['rows']
def query(self, query):
return (self._jobs[0]['job_id'], None)
def _job(self, job_id):
for job in self._jobs:
if job['job_id'] == job_id:
return job
return {}
def _dataset_resource(self, dataset_id, friendly_name=None, description=None, access=None):
data = {
"datasetReference": {
"datasetId": dataset_id,
"projectId": 'test-project-id'
}
}
if friendly_name:
data["friendlyName"] = friendly_name
if description:
data["description"] = description
if access:
data["access"] = access
return data
class MockGCSClient(object):
def __init__(self, objects):
self._objects = objects
def get_file(self, bucket_name, path):
for obj in self._objects:
if obj['bucket'] == bucket_name and obj['name'] == path:
return obj
return {}
def check_file(self, bucket_name, path):
file = self.get_file(bucket_name, path)
return bool(file)
class TestConfig(object):
def __init__(self, datasets=[], tables=[], jobs=[], objects=[]):
self.datasets = datasets
self.tables = tables
self.objects = objects
self._jobs = jobs
self.tmp_dir = None
def setUp(self):
if not self.tmp_dir:
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
self.tmp_dir = None
def get_tmp_path(self, filename):
return os.path.join(self.tmp_dir, filename)
def get_client(self):
return MockClient(datasets=self.datasets, tables=self.tables, jobs=self._jobs)
def get_gcs_client(self):
return MockGCSClient(objects=self.objects)
|
kamatama41/luigi-bigquery
|
luigi_bigquery/tests/test_helper.py
|
Python
|
apache-2.0
| 3,217
|
# -*- coding: utf-8 -*-
'''
Wheel system wrapper for key system
'''
from __future__ import absolute_import
# Import python libs
import os
import hashlib
# Import salt libs
import salt.key
import salt.crypt
__func_alias__ = {
'list_': 'list'
}
def list_(match):
'''
List all the keys under a named status
'''
skey = salt.key.Key(__opts__)
return skey.list_status(match)
def list_all():
'''
List all the keys
'''
skey = salt.key.Key(__opts__)
return skey.all_keys()
def accept(match, include_rejected=False, include_denied=False):
'''
Accept keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.accept(match, include_rejected=include_rejected, include_denied=include_denied)
def accept_dict(match):
'''
Accept keys based on a dict of keys
Example to move a list of keys from the `minions_pre` (pending) directory
to the `minions` (accepted) directory:
.. code-block:: python
{
'minions_pre': [
'jerry',
'stuart',
'bob',
],
}
'''
skey = salt.key.Key(__opts__)
return skey.accept(match_dict=match)
def delete(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match)
def delete_dict(match):
'''
Delete keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match_dict=match)
def reject(match, include_accepted=False, include_denied=False):
'''
Reject keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.reject(match, include_accepted=include_accepted, include_denied=include_denied)
def reject_dict(match):
'''
Reject keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.reject(match_dict=match)
def key_str(match):
'''
Return the key strings
'''
skey = salt.key.Key(__opts__)
return skey.key_str(match)
def finger(match):
'''
Return the matching key fingerprints
'''
skey = salt.key.Key(__opts__)
return skey.finger(match)
def gen(id_=None, keysize=2048):
'''
Generate a key pair. No keys are stored on the master, a keypair is
returned as a dict containing pub and priv keys
'''
if id_ is None:
id_ = hashlib.sha512(os.urandom(32)).hexdigest()
ret = {'priv': '',
'pub': ''}
priv = salt.crypt.gen_keys(__opts__['pki_dir'], id_, keysize)
pub = '{0}.pub'.format(priv[:priv.rindex('.')])
with salt.utils.fopen(priv) as fp_:
ret['priv'] = fp_.read()
with salt.utils.fopen(pub) as fp_:
ret['pub'] = fp_.read()
os.remove(priv)
os.remove(pub)
return ret
def gen_accept(id_, keysize=2048, force=False):
'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master.
'''
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__['pki_dir'], 'minions', id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.fopen(acc_path, 'w+') as fp_:
fp_.write(ret['pub'])
return ret
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/wheel/key.py
|
Python
|
apache-2.0
| 3,278
|
# -*- coding: utf-8 -*-
import os
import time
import json
import Image, StringIO
import allura
from mock import patch
from nose.tools import assert_true, assert_false, assert_equal, assert_in
from formencode.variabledecode import variable_encode
from alluratest.controller import TestController
from allura import model as M
from forgewiki import model as wm
from forgetracker import model as tm
from allura.lib.security import has_access
from allura.lib import helpers as h
from allura.tests import decorators as td
from ming.orm.ormsession import ThreadLocalORMSession
class TrackerTestController(TestController):
def setUp(self):
super(TrackerTestController, self).setUp()
self.setup_with_tools()
@td.with_tracker
def setup_with_tools(self):
pass
def new_ticket(self, mount_point='/bugs/', extra_environ=None, **kw):
extra_environ = extra_environ or {}
response = self.app.get(mount_point + 'new/',
extra_environ=extra_environ)
form = response.forms[1]
for k, v in kw.iteritems():
form['ticket_form.%s' % k] = v
resp = form.submit()
assert resp.status_int != 200, resp
return resp
class TestMilestones(TrackerTestController):
def test_milestone_list(self):
r = self.app.get('/bugs/milestones')
assert '1.0' in r, r.showbrowser()
def test_milestone_list_progress(self):
self.new_ticket(summary='foo', _milestone='1.0')
self.new_ticket(summary='bar', _milestone='1.0', status='closed')
r = self.app.get('/bugs/milestones')
assert '1 / 2' in r, r.showbrowser()
def test_default_milestone_created_if_missing(self):
p = M.Project.query.get(shortname='test')
app = p.app_instance('bugs')
app.globals.custom_fields = []
ThreadLocalORMSession.flush_all()
d = {
'field_name':'_milestone',
'milestones-0.old_name':'',
'milestones-0.new_name':'1.0',
'milestones-0.description':'Version 1',
'milestones-0.complete':'Open',
'milestones-0.due_date':''
}
r = self.app.post('/bugs/update_milestones', d)
r = self.app.get('/bugs/milestones')
assert 'Version 1' in r
# make sure _milestone doesn't get created again if it already exists
r = self.app.post('/bugs/update_milestones', d)
p = M.Project.query.get(shortname='test')
app = p.app_instance('bugs')
assert len(app.globals.custom_fields) == 1, len(app.globals.custom_fields)
def post_install_create_ticket_permission(app):
"""Set to authenticated permission to create tickets but not update"""
role = M.ProjectRole.by_name('*authenticated')._id
create_permission = M.ACE.allow(role, 'create')
update_permission = M.ACE.allow(role, 'update')
acl = app.config.acl
acl.append(create_permission)
if update_permission in acl:
acl.remove(update_permission)
def post_install_update_ticket_permission(app):
"""Set to anonymous permission to create and update tickets"""
role = M.ProjectRole.by_name('*anonymous')._id
app.config.acl.append(M.ACE.allow(role, 'create'))
app.config.acl.append(M.ACE.allow(role, 'update'))
class TestFunctionalController(TrackerTestController):
def test_bad_ticket_number(self):
self.app.get('/bugs/input.project_user_select', status=404)
def test_invalid_ticket(self):
self.app.get('/bugs/2/', status=404)
@patch('forgetracker.tracker_main.g.director.create_activity')
def test_activity(self, create_activity):
self.new_ticket(summary='my ticket', description='my description')
assert create_activity.call_count == 1
assert create_activity.call_args[0][1] == 'created'
create_activity.reset_mock()
self.app.post('/bugs/1/update_ticket',{
'summary':'my ticket',
'description':'new description',
})
# create_activity is called twice here:
# - once for the ticket modification
# - once for the auto-comment that's created for the ticket diff
assert create_activity.call_count == 2
assert create_activity.call_args[0][1] == 'modified'
def test_new_ticket(self):
summary = 'test new ticket'
ticket_view = self.new_ticket(summary=summary).follow()
assert_true(summary in ticket_view)
assert 'class="artifact_subscribe' in ticket_view
def test_new_with_milestone(self):
ticket_view = self.new_ticket(summary='test new with milestone', **{'_milestone':'1.0'}).follow()
assert 'Milestone' in ticket_view
assert '1.0' in ticket_view
def test_milestone_count(self):
self.new_ticket(summary='test new with milestone', **{'_milestone':'1.0'})
self.new_ticket(summary='test new with milestone', **{'_milestone':'1.0',
'private': '1'})
r = self.app.get('/bugs/')
assert '<small>2</small>' in r
# Private tickets shouldn't be included in counts if user doesn't
# have read access to private tickets.
r = self.app.get('/bugs/', extra_environ=dict(username='*anonymous'))
assert '<small>1</small>' in r
def test_milestone_progress(self):
self.new_ticket(summary='Ticket 1', **{'_milestone':'1.0'})
self.new_ticket(summary='Ticket 2', **{'_milestone':'1.0',
'status': 'closed',
'private': '1'}).follow()
r = self.app.get('/bugs/milestone/1.0/')
assert '1 / 2' in r
# Private tickets shouldn't be included in counts if user doesn't
# have read access to private tickets.
r = self.app.get('/bugs/milestone/1.0/',
extra_environ=dict(username='*anonymous'))
assert '0 / 1' in r
def test_new_ticket_form(self):
response = self.app.get('/bugs/new/')
form = response.forms[1]
form['ticket_form.summary'] = 'test new ticket form'
form['ticket_form.assigned_to'] = 'test_admin'
response = form.submit().follow()
assert 'Test Admin' in response
def test_private_ticket(self):
ticket_view = self.new_ticket(summary='Public Ticket').follow()
assert_true('<label class="simple">Private:</label> No' in ticket_view)
ticket_view = self.new_ticket(summary='Private Ticket',
private=True).follow()
assert_true('<label class="simple">Private:</label> Yes' in ticket_view)
M.MonQTask.run_ready()
# Creator sees private ticket on list page...
index_response = self.app.get('/p/test/bugs/')
assert '2 results' in index_response
assert 'Public Ticket' in index_response
assert 'Private Ticket' in index_response
# ...and in search results.
search_response = self.app.get('/p/test/bugs/search/?q=ticket')
assert '2 results' in search_response
assert 'Private Ticket' in search_response
# Unauthorized user doesn't see private ticket on list page...
env = dict(username='*anonymous')
r = self.app.get('/p/test/bugs/', extra_environ=env)
assert '1 results' in r
assert 'Private Ticket' not in r
# ...or in search results...
r = self.app.get('/p/test/bugs/search/?q=ticket', extra_environ=env)
assert '1 results' in r
assert 'Private Ticket' not in r
# ... or in search feed...
r = self.app.get('/p/test/bugs/search_feed?q=ticket', extra_environ=env)
assert 'Private Ticket' not in r
# ...and can't get to the private ticket directly.
r = self.app.get(ticket_view.request.url, extra_environ=env)
assert 'Private Ticket' not in r
# ... and it doesn't appear in the feed
r = self.app.get('/p/test/bugs/feed.atom')
assert 'Private Ticket' not in r
# ... or in the API ...
r = self.app.get('/rest/p/test/bugs/2/')
assert 'Private Ticket' not in r
assert '/auth/?return_to' in r.headers['Location']
r = self.app.get('/rest/p/test/bugs/')
assert 'Private Ticket' not in r
@td.with_tool('test', 'Tickets', 'doc-bugs')
def test_two_trackers(self):
summary = 'test two trackers'
ticket_view = self.new_ticket('/doc-bugs/', summary=summary, _milestone='1.0').follow()
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
assert_true(summary in ticket_view)
index_view = self.app.get('/doc-bugs/')
assert_true(summary in index_view)
assert_true(sidebar_contains(index_view, '<span class="has_small">1.0</span><small>1</small>'))
index_view = self.app.get('/bugs/')
assert_false(sidebar_contains(index_view, '<span class="has_small">1.0</span><small>1</small>'))
assert_false(summary in index_view)
def test_render_ticket(self):
summary = 'test render ticket'
ticket_view = self.new_ticket(summary=summary).follow()
ticket_view.mustcontain(summary, 'Discussion')
def test_render_index(self):
admin = M.User.query.get(username='test-admin')
anon = M.User.query.get(username="*anonymous")
for app in M.AppConfig.query.find({'options.mount_point': 'bugs'}):
assert has_access(app, 'create', admin)
assert not has_access(app, 'create', anon)
index_view = self.app.get('/bugs/')
assert 'No open tickets found.' in index_view
assert 'Create Ticket' in index_view
# No 'Create Ticket' button for user without 'create' perm
r = self.app.get('/bugs/', extra_environ=dict(username='*anonymous'))
assert 'Create Ticket' not in r
def test_render_markdown_syntax(self):
r = self.app.get('/bugs/markdown_syntax')
assert_true('Markdown Syntax' in r)
def test_ticket_diffs(self):
self.new_ticket(summary='difftest', description='1\n2\n3\n')
self.app.post('/bugs/1/update_ticket',{
'summary':'difftest',
'description':'1\n3\n4\n',
})
r = self.app.get('/bugs/1/')
assert '<span class="gd">-2</span>' in r, r.showbrowser()
assert '<span class="gi">+4</span>' in r, r.showbrowser()
def test_ticket_label_unlabel(self):
summary = 'test labeling and unlabeling a ticket'
self.new_ticket(summary=summary)
self.app.post('/bugs/1/update_ticket',{
'summary':'aaa',
'description':'bbb',
'status':'ccc',
'_milestone':'',
'assigned_to':'',
'labels':u'yellow,greén'.encode('utf-8'),
'labels_old':u'yellow,greén'.encode('utf-8'),
'comment': ''
})
response = self.app.get('/bugs/1/')
assert_true('yellow' in response)
assert_true(u'greén' in response)
self.app.post('/bugs/1/update_ticket',{
'summary':'zzz',
'description':'bbb',
'status':'ccc',
'_milestone':'',
'assigned_to':'',
'labels':'yellow',
'labels_old':'yellow',
'comment': ''
})
response = self.app.get('/bugs/1/')
assert_true('yellow' in response)
# the following assert is no longer true since "green" is shown in changelog
# assert_true('green' not in response)
def test_new_attachment(self):
file_name = 'test_root.py'
file_data = file(__file__).read()
upload = ('attachment', file_name, file_data)
self.new_ticket(summary='test new attachment')
ticket_editor = self.app.post('/bugs/1/update_ticket',{
'summary':'zzz'
}, upload_files=[upload]).follow()
assert_true(file_name in ticket_editor)
def test_delete_attachment(self):
file_name = 'test_root.py'
file_data = file(__file__).read()
upload = ('attachment', file_name, file_data)
self.new_ticket(summary='test new attachment')
ticket_editor = self.app.post('/bugs/1/update_ticket',{
'summary':'zzz'
}, upload_files=[upload]).follow()
assert file_name in ticket_editor, ticket_editor.showbrowser()
req = self.app.get('/bugs/1/')
file_link = req.html.findAll('form')[1].findAll('a')[7]
assert_equal(file_link.string, file_name)
self.app.post(str(file_link['href']),{
'delete':'True'
})
deleted_form = self.app.get('/bugs/1/')
assert file_name not in deleted_form
def test_new_text_attachment_content(self):
file_name = 'test_root.py'
file_data = file(__file__).read()
upload = ('attachment', file_name, file_data)
self.new_ticket(summary='test new attachment')
ticket_editor = self.app.post('/bugs/1/update_ticket',{
'summary':'zzz'
}, upload_files=[upload]).follow()
download = self.app.get(str(ticket_editor.html.findAll('form')[1].findAll('a')[7]['href']))
assert_equal(download.body, file_data)
def test_new_image_attachment_content(self):
h.set_context('test', 'bugs', neighborhood='Projects')
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(allura.__path__[0],'nf','allura','images',file_name)
file_data = file(file_path).read()
upload = ('attachment', file_name, file_data)
self.new_ticket(summary='test new attachment')
ticket_editor = self.app.post('/bugs/1/update_ticket',{
'summary':'zzz'
}, upload_files=[upload]).follow()
ticket = tm.Ticket.query.find({'ticket_num':1}).first()
filename = ticket.attachments.first().filename
uploaded = Image.open(file_path)
r = self.app.get('/bugs/1/attachment/'+filename)
downloaded = Image.open(StringIO.StringIO(r.body))
assert uploaded.size == downloaded.size
r = self.app.get('/bugs/1/attachment/'+filename+'/thumb')
thumbnail = Image.open(StringIO.StringIO(r.body))
assert thumbnail.size == (100,100)
def test_sidebar_static_page(self):
admin = M.User.query.get(username='test-admin')
for app in M.AppConfig.query.find({'options.mount_point': 'bugs'}):
assert has_access(app, 'create', admin)
response = self.app.get('/bugs/search/')
assert 'Create Ticket' in response
assert 'Related Pages' not in response
def test_related_artifacts(self):
summary = 'test sidebar logic for a ticket page'
self.new_ticket(summary=summary)
response = self.app.get('/p/test/bugs/1/')
assert 'Related Pages' not in response
self.app.post('/wiki/aaa/update', params={
'title':'aaa',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.new_ticket(summary='bbb')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
h.set_context('test', 'wiki', neighborhood='Projects')
a = wm.Page.query.find(dict(title='aaa')).first()
a.text = '\n[bugs:#1]\n'
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
b = tm.Ticket.query.find(dict(ticket_num=2)).first()
b.description = '\n[#1]\n'
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/1/')
assert 'Related' in response
assert 'Wiki: aaa' in response
assert 'Ticket: #2' in response
def test_ticket_view_editable(self):
summary = 'test ticket view page can be edited'
self.new_ticket(summary=summary)
response = self.app.get('/p/test/bugs/1/')
assert response.html.find('input', {'name': 'ticket_form.summary'})
assert response.html.find('input', {'name': 'ticket_form.assigned_to'})
assert response.html.find('textarea', {'name': 'ticket_form.description'})
assert response.html.find('select', {'name': 'ticket_form.status'})
assert response.html.find('select', {'name': 'ticket_form._milestone'})
assert response.html.find('input', {'name': 'ticket_form.labels'})
assert response.html.find('textarea', {'name': 'ticket_form.comment'})
def test_assigned_to_nobody(self):
summary = 'test default assignment'
self.new_ticket(summary=summary)
response = self.app.get('/p/test/bugs/1/')
assert 'nobody' in str(response.html.find('div', {'class': 'grid-5 ticket-assigned-to'}))
def test_assign_ticket(self):
summary = 'test assign ticket'
self.new_ticket(summary=summary)
response = self.app.get('/p/test/bugs/1/')
assert 'nobody' in str(response.html.find('div', {'class': 'grid-5 ticket-assigned-to'}))
response = self.app.post('/bugs/1/update_ticket',{
'summary':'zzz',
'description':'bbb',
'status':'ccc',
'_milestone':'',
'assigned_to':'test-admin',
'labels':'',
'labels_old':'',
'comment': ''
}).follow()
assert 'test-admin' in str(response.html.find('div', {'class': 'grid-5 ticket-assigned-to'}))
assert '<li><strong>summary</strong>: test assign ticket --> zzz' in response
assert '<li><strong>status</strong>: open --> ccc' in response
def test_custom_fields(self):
params = dict(
custom_fields=[
dict(name='_priority', label='Priority', type='select',
options='normal urgent critical'),
dict(name='_category', label='Category', type='string',
options=''),
dict(name='_code_review', label='Code Review', type='user')],
open_status_names='aa bb',
closed_status_names='cc',
)
self.app.post(
'/admin/bugs/set_custom_fields',
params=variable_encode(params))
kw = {'custom_fields._priority':'normal',
'custom_fields._category':'helloworld',
'custom_fields._code_review':'test-admin'}
ticket_view = self.new_ticket(summary='test custom fields', **kw).follow()
assert 'Priority:' in ticket_view
assert 'normal' in ticket_view
assert 'Test Admin' in ticket_view
def test_custom_field_update_comments(self):
params = dict(
custom_fields=[
dict(label='Number', type='number', options='')],
open_status_names='aa bb',
closed_status_names='cc',
)
r = self.app.post('/admin/bugs/set_custom_fields',
params=variable_encode(params))
kw = {'custom_fields._number':''}
ticket_view = self.new_ticket(summary='test custom fields', **kw).follow()
assert '<strong>number</strong>: -->' not in ticket_view
ticket_view = self.app.post('/bugs/1/update_ticket',params={
'summary':'zzz',
'description':'bbb',
'status':'ccc',
'_milestone':'aaa',
'assigned_to':'',
'labels':'',
'labels_old':'',
'custom_fields._number':'',
'comment': ''
}).follow()
assert '<strong>number</strong>: -->' not in ticket_view
ticket_view = self.app.post('/bugs/1/update_ticket',params={
'summary':'zzz',
'description':'bbb',
'status':'ccc',
'_milestone':'aaa',
'assigned_to':'',
'labels':'',
'labels_old':'',
'custom_fields._number':'4',
'comment': ''
}).follow()
assert '<strong>number</strong>: -->' in ticket_view
def test_milestone_names(self):
params = {
'open_status_names': 'aa bb',
'closed_status_names': 'cc',
'custom_fields': [dict(
label='Milestone',
show_in_search='on',
type='milestone',
milestones=[
dict(name='aaaé'),
dict(name='bbb'),
dict(name='ccc')])] }
self.app.post('/admin/bugs/set_custom_fields',
variable_encode(params),
status=302)
self.new_ticket(summary='test milestone names')
self.app.post('/bugs/1/update_ticket',{
'summary':'zzz',
'description':'bbb',
'status':'ccc',
'_milestone':'aaaé',
'assigned_to':'',
'labels':'',
'labels_old':'',
'comment': ''
})
ticket_view = self.app.get('/p/test/bugs/1/')
assert 'Milestone' in ticket_view
assert 'aaaé' in ticket_view
def test_milestone_rename(self):
self.new_ticket(summary='test milestone rename')
self.app.post('/bugs/1/update_ticket',{
'summary':'test milestone rename',
'description':'',
'status':'',
'_milestone':'1.0',
'assigned_to':'',
'labels':'',
'labels_old':'',
'comment': ''
})
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
ticket_view = self.app.get('/p/test/bugs/1/')
assert 'Milestone' in ticket_view
assert '1.0' in ticket_view
assert 'zzzé' not in ticket_view
r = self.app.post('/bugs/update_milestones',{
'field_name':'_milestone',
'milestones-0.old_name':'1.0',
'milestones-0.new_name':'zzzé',
'milestones-0.description':'',
'milestones-0.complete':'Open',
'milestones-0.due_date':''
})
ticket_view = self.app.get('/p/test/bugs/1/')
assert '1.0' not in ticket_view
assert 'zzzé' in ticket_view
def test_milestone_close(self):
self.new_ticket(summary='test milestone close')
r = self.app.get('/bugs/milestones')
assert 'view closed' not in r
r = self.app.post('/bugs/update_milestones',{
'field_name':'_milestone',
'milestones-0.old_name':'1.0',
'milestones-0.new_name':'1.0',
'milestones-0.description':'',
'milestones-0.complete':'Closed',
'milestones-0.due_date':''
})
r = self.app.get('/bugs/milestones')
assert 'view closed' in r
def test_edit_all_button(self):
admin = M.User.query.get(username='test-admin')
for app in M.AppConfig.query.find({'options.mount_point': 'bugs'}):
assert has_access(app, 'update', admin)
response = self.app.get('/p/test/bugs/search/')
assert 'Edit All' not in response
def test_new_ticket_validation(self):
summary = 'ticket summary'
response = self.app.get('/bugs/new/')
assert not response.html.find('div', {'class':'error'})
form = response.forms[1]
form['ticket_form.labels'] = 'foo'
# try submitting with no summary set and check for error message
error_form = form.submit()
assert error_form.forms[1]['ticket_form.labels'].value == 'foo'
error_message = error_form.html.find('div', {'class':'error'})
assert error_message
assert (error_message.string == 'You must provide a Title' or \
error_message.string == 'Missing value')
assert error_message.findPreviousSibling('input').get('name') == 'ticket_form.summary'
# set a summary, submit, and check for success
error_form.forms[1]['ticket_form.summary'] = summary
success = error_form.forms[1].submit().follow().html
assert success.findAll('form')[1].get('action') == '/p/test/bugs/1/update_ticket_from_widget'
assert success.find('input', {'name':'ticket_form.summary'})['value'] == summary
def test_edit_ticket_validation(self):
old_summary = 'edit ticket test'
new_summary = "new summary"
self.new_ticket(summary=old_summary)
response = self.app.get('/bugs/1/')
# check that existing form is valid
assert response.html.find('input', {'name':'ticket_form.summary'})['value'] == old_summary
assert not response.html.find('div', {'class':'error'})
form = response.forms[1]
# try submitting with no summary set and check for error message
form['ticket_form.summary'] = ""
error_form = form.submit()
error_message = error_form.html.find('div', {'class':'error'})
assert error_message
assert error_message.string == 'You must provide a Title'
assert error_message.findPreviousSibling('input').get('name') == 'ticket_form.summary'
# set a summary, submit, and check for success
error_form.forms[1]['ticket_form.summary'] = new_summary
r = error_form.forms[1].submit()
assert r.status_int == 302, r.showbrowser()
success = r.follow().html
assert success.findAll('form')[1].get('action') == '/p/test/bugs/1/update_ticket_from_widget'
assert success.find('input', {'name':'ticket_form.summary'})['value'] == new_summary
def test_home(self):
self.new_ticket(summary='test first ticket')
self.new_ticket(summary='test second ticket')
self.new_ticket(summary='test third ticket')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/')
assert 'test third ticket' in response
def test_search(self):
self.new_ticket(summary='test first ticket')
self.new_ticket(summary='test second ticket')
self.new_ticket(summary='test third ticket')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/search/?q=test')
assert '3 results' in response, response.showbrowser()
assert 'test third ticket' in response, response.showbrowser()
def test_search_feed(self):
self.new_ticket(summary='test first ticket')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/search_feed?q=test')
assert '<title>test first ticket</title>' in response
response = self.app.get('/p/test/bugs/search_feed.atom?q=test')
assert '<title>test first ticket</title>' in response
def test_touch(self):
self.new_ticket(summary='test touch')
h.set_context('test', 'bugs', neighborhood='Projects')
ticket = tm.Ticket.query.get(ticket_num=1)
old_date = ticket.mod_date
ticket.summary = 'changing the summary'
time.sleep(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
ticket = tm.Ticket.query.get(ticket_num=1)
new_date = ticket.mod_date
assert new_date > old_date
@patch('forgetracker.tracker_main.search_artifact')
def test_save_invalid_search(self, search_artifact):
err = 'Error running search query: [Reason: undefined field label]'
search_artifact.side_effect = ValueError(err)
r = self.app.post('/admin/bugs/bins/save_bin',{
'summary': 'This is not too long.',
'terms': 'label:foo',
'old_summary': '',
'sort': ''})
assert err in r
r = self.app.get('/admin/bugs/bins/')
edit_form = r.form
edit_form['bins-2.summary'] = 'Original'
edit_form['bins-2.terms'] = 'label:foo'
r = edit_form.submit()
assert err in r
def test_saved_search_labels_truncated(self):
r = self.app.post('/admin/bugs/bins/save_bin',{
'summary': 'This is not too long.',
'terms': 'aaa',
'old_summary': '',
'sort': ''}).follow()
r = self.app.get('/bugs/')
assert sidebar_contains(r, 'This is not too long.')
r = self.app.post('/admin/bugs/bins/save_bin',{
'summary': 'This will be truncated because it is too long to show in the sidebar without being ridiculous.',
'terms': 'aaa',
'old_summary': '',
'sort': ''}).follow()
r = self.app.get('/bugs/')
assert sidebar_contains(r, 'This will be truncated because it is too long to show in the sidebar ...')
def test_edit_saved_search(self):
r = self.app.get('/admin/bugs/bins/')
edit_form = r.form
edit_form['bins-2.summary'] = 'Original'
edit_form['bins-2.terms'] = 'aaa'
edit_form.submit()
r = self.app.get('/bugs/')
assert sidebar_contains(r, 'Original')
assert not sidebar_contains(r, 'New')
r = self.app.get('/admin/bugs/bins/')
edit_form = r.form
edit_form['bins-2.summary'] = 'New'
edit_form.submit()
r = self.app.get('/bugs/')
assert sidebar_contains(r, 'New')
assert not sidebar_contains(r, 'Original')
def test_discussion_paging(self):
summary = 'test discussion paging'
ticket_view = self.new_ticket(summary=summary).follow()
for f in ticket_view.html.findAll('form'):
if f.get('action', '').endswith('/post'):
break
post_content = 'ticket discussion post content'
params = dict()
inputs = f.findAll('input')
for field in inputs:
if field.has_key('name'):
params[field['name']] = field.has_key('value') and field['value'] or ''
params[f.find('textarea')['name']] = post_content
r = self.app.post(f['action'].encode('utf-8'), params=params,
headers={'Referer': '/bugs/1/'.encode("utf-8")})
r = self.app.get('/bugs/1/', dict(page=-1))
assert_true(summary in r)
r = self.app.get('/bugs/1/', dict(page=1))
assert_true(post_content in r)
# no pager if just one page
assert_false('Page 1 of 1' in r)
# add some more posts and check for pager
for i in range(2):
r = self.app.post(f['action'].encode('utf-8'), params=params,
headers={'Referer': '/bugs/1/'.encode("utf-8")})
r = self.app.get('/bugs/1/', dict(page=1, limit=2))
assert_true('Page 2 of 2' in r)
def test_bulk_edit_index(self):
self.new_ticket(summary='test first ticket', status='open')
self.new_ticket(summary='test second ticket', status='accepted')
self.new_ticket(summary='test third ticket', status='closed')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/?sort=summary+asc')
ticket_rows = response.html.find('table', {'class':'ticket-list'}).find('tbody')
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
edit_link = response.html.find('a',{'title':'Bulk Edit'})
expected_link = "/p/test/bugs/edit/?q=%21status%3Awont-fix+%26%26+%21status%3Aclosed&sort=snippet_s+asc&limit=25&page=0"
assert_equal(expected_link, edit_link['href'])
response = self.app.get(edit_link['href'])
ticket_rows = response.html.find('tbody', {'class':'ticket-list'})
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
def test_bulk_edit_milestone(self):
self.new_ticket(summary='test first ticket', status='open', _milestone='1.0')
self.new_ticket(summary='test second ticket', status='accepted', _milestone='1.0')
self.new_ticket(summary='test third ticket', status='closed', _milestone='1.0')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/milestone/1.0/?sort=ticket_num+asc')
ticket_rows = response.html.find('table', {'class':'ticket-list'}).find('tbody')
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
assert_in('test third ticket', str(ticket_rows))
edit_link = response.html.find('a',{'title':'Bulk Edit'})
expected_link = "/p/test/bugs/edit/?q=_milestone%3A1.0&sort=ticket_num_i+asc&limit=25&page=0"
assert_equal(expected_link, edit_link['href'])
response = self.app.get(edit_link['href'])
ticket_rows = response.html.find('tbody', {'class':'ticket-list'})
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
assert_in('test third ticket', str(ticket_rows))
def test_bulk_edit_search(self):
self.new_ticket(summary='test first ticket', status='open')
self.new_ticket(summary='test second ticket', status='open')
self.new_ticket(summary='test third ticket', status='closed', _milestone='1.0')
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
response = self.app.get('/p/test/bugs/search/?q=status%3Aopen')
ticket_rows = response.html.find('table', {'class':'ticket-list'}).find('tbody')
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
assert_false('test third ticket' in str(ticket_rows))
edit_link = response.html.find('a',{'title':'Bulk Edit'})
expected_link = "/p/test/bugs/edit/?q=status%3Aopen&limit=25&page=0"
assert_equal(expected_link, edit_link['href'])
response = self.app.get(edit_link['href'])
ticket_rows = response.html.find('tbody', {'class':'ticket-list'})
assert_in('test first ticket', str(ticket_rows))
assert_in('test second ticket', str(ticket_rows))
assert_false('test third ticket' in str(ticket_rows))
def test_vote(self):
r = self.new_ticket(summary='test vote').follow()
assert_false(r.html.find('div', {'id': 'vote'}))
# enable voting
self.app.post('/admin/bugs/set_options',
params={'EnableVoting': 'true'})
r = self.app.get('/bugs/1/')
votes_up = r.html.find('span', {'class': 'votes-up'})
votes_down = r.html.find('span', {'class': 'votes-down'})
assert_in('0', str(votes_up))
assert_in('0', str(votes_down))
# invalid vote
r = self.app.post('/bugs/1/vote', dict(vote='invalid'))
expected_resp = json.dumps(
dict(status='error', votes_up=0, votes_down=0, votes_percent=0))
assert r.response.content == expected_resp
# vote up
r = self.app.post('/bugs/1/vote', dict(vote='u'))
expected_resp = json.dumps(
dict(status='ok', votes_up=1, votes_down=0, votes_percent=100))
assert r.response.content == expected_resp
# vote down by another user
r = self.app.post('/bugs/1/vote', dict(vote='d'),
extra_environ=dict(username='test-user-0'))
expected_resp = json.dumps(
dict(status='ok', votes_up=1, votes_down=1, votes_percent=50))
assert r.response.content == expected_resp
# make sure that on the page we see the same result
r = self.app.get('/bugs/1/')
votes_up = r.html.find('span', {'class': 'votes-up'})
votes_down = r.html.find('span', {'class': 'votes-down'})
assert_in('1', str(votes_up))
assert_in('1', str(votes_down))
r = self.app.get('/bugs/')
assert "Votes" in r
self.app.post(
'/admin/bugs/set_options',
params={'EnableVoting': 'false'})
r = self.app.get('/bugs/')
assert "Votes" not in r
@td.with_tool('test', 'Tickets', 'tracker',
post_install_hook=post_install_create_ticket_permission)
def test_create_permission(self):
"""Test that user with `create` permission can create ticket,
but can't edit it without `update` permission.
"""
response = self.app.get('/p/test/tracker/',
extra_environ=dict(username='test-user-0'))
assert 'Create Ticket' in response
response = self.new_ticket(summary='test create, not update',
mount_point='/tracker/',
extra_environ=dict(username='test-user-0'))
ticket_url = response.headers['Location']
response = self.app.get(ticket_url,
extra_environ=dict(username='test-user-0'))
assert not response.html.find('div',{'class': 'error'})
assert not response.html.find('a', {'class': 'edit_ticket'})
@td.with_tool('test', 'Tickets', 'tracker',
post_install_hook=post_install_update_ticket_permission)
def test_update_permission(self):
r = self.app.get('/p/test/tracker/',
extra_environ=dict(username='*anonymous'))
assert 'Create Ticket' in r
r = self.new_ticket(summary='test', mount_point='/tracker/',
extra_environ=dict(username='*anonymous'))
ticket_url = r.headers['Location']
r = self.app.get(ticket_url, extra_environ=dict(username='*anonymous'))
a = r.html.find('a', {'class': 'edit_ticket'})
assert a.text == 'Edit'
class TestMilestoneAdmin(TrackerTestController):
def _post(self, params, **kw):
params['open_status_names'] = 'aa bb'
params['closed_status_names'] = 'cc'
self.app.post('/admin/bugs/set_custom_fields',
params=variable_encode(params), **kw)
return self.app.get('/admin/bugs/fields')
def _post_milestones(self, milestones):
params = {'custom_fields': [
dict(label=mf['label'],
show_in_search='on',
type='milestone',
milestones=[
dict((k, v) for k, v in d.iteritems()) for d in mf['milestones']])
for mf in milestones]}
return self._post(params)
def test_create_milestone_field(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta')])
])
assert 'releases' in r
assert '1.0-beta' in r
def test_delete_milestone_field(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta')])
])
self.new_ticket(summary='test new milestone',
**{'custom_fields._releases':'1.0-beta'})
assert tm.Ticket.query.find({
'custom_fields._releases': '1.0-beta'}).count() == 1
r = self._post_milestones([])
assert 'Releases' not in r
assert '1.0-beta' not in r
assert tm.Ticket.query.find({
'custom_fields._releases': '1.0-beta'}).count() == 0
def test_rename_milestone_field(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta')])
])
self.new_ticket(summary='test new milestone',
**{'custom_fields._releases':'1.0-beta'})
r = self._post_milestones([
dict(label='versions', milestones=[dict(name='1.0/beta')])
])
assert 'Releases' not in r
assert 'versions' in r
assert '1.0-beta' in r
# TODO: This doesn't work - need to make milestone custom fields
# renameable.
#assert tm.Ticket.query.find({
# 'custom_fields._versions': '1.0-beta'}).count() == 1
def test_create_milestone(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta')])
])
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta'),
dict(name='2.0')])
])
assert '1.0-beta' in r
assert '2.0' in r
def test_delete_milestone(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0/beta')])
])
self.new_ticket(summary='test new milestone',
**{'custom_fields._releases':'1.0-beta'})
assert tm.Ticket.query.find({
'custom_fields._releases': '1.0-beta'}).count() == 1
r = self._post_milestones([
dict(label='releases', milestones=[])
])
assert 'releases' in r
assert '1.0-beta' not in r
assert tm.Ticket.query.find({
'custom_fields._releases': '1.0-beta'}).count() == 0
def test_rename_milestone(self):
r = self._post_milestones([
dict(label='releases', milestones=[dict(name='1.0')])
])
self.new_ticket(summary='test new milestone',
**{'custom_fields._releases':'1.0'})
r = self._post_milestones([
dict(label='releases', milestones=[
dict(name='1.1', old_name='1.0')])
])
assert 'releases'in r
assert '1.0' not in r
assert '1.1' in r
assert tm.Ticket.query.find({
'custom_fields._releases': '1.0'}).count() == 0
assert tm.Ticket.query.find({
'custom_fields._releases': '1.1'}).count() == 1
def post_install_hook(app):
role_anon = M.ProjectRole.by_name('*anonymous')._id
app.config.acl.append(M.ACE.allow(role_anon, 'post'))
app.config.acl.append(M.ACE.allow(role_anon, 'create'))
app.config.acl.append(M.ACE.allow(role_anon, 'update'))
class TestEmailMonitoring(TrackerTestController):
def __init__(self):
super(TestEmailMonitoring, self).__init__()
self.test_email = 'mailinglist@example.com'
def _set_options(self, monitoring_type='AllTicketChanges'):
r = self.app.post('/admin/bugs/set_options', params={
'TicketMonitoringEmail': self.test_email,
'TicketMonitoringType': monitoring_type,
})
return r
def test_set_options(self):
r = self._set_options()
r = self.app.get('/admin/bugs/options')
email = r.html.findAll(attrs=dict(name='TicketMonitoringEmail'))
mtype = r.html.findAll('option', attrs=dict(value='AllTicketChanges'))
assert email[0]['value'] == self.test_email
assert mtype[0]['selected'] == 'selected'
@td.with_tool('test', 'Tickets', 'doc-bugs', post_install_hook=post_install_hook)
@patch('forgetracker.model.ticket.Notification.send_direct')
def test_notifications_moderators(self, send_direct):
self.new_ticket(summary='test moderation', mount_point='/doc-bugs/')
self.app.post('/doc-bugs/1/update_ticket',{
'summary':'test moderation',
'comment':'test unmoderated post'
}, extra_environ=dict(username='*anonymous'))
send_direct.assert_called_with(str(M.User.query.get(username='test-admin')._id))
@patch('forgetracker.model.ticket.Notification.send_simple')
def test_notifications_new(self, send_simple):
self._set_options('NewTicketsOnly')
self.new_ticket(summary='test')
self.app.post('/bugs/1/update_ticket',{
'summary':'test',
'description':'update',
})
send_simple.assert_called_once_with(self.test_email)
@patch('forgetracker.tracker_main.M.Notification.send_simple')
def test_notifications_all(self, send_simple):
self._set_options()
self.new_ticket(summary='test')
send_simple.assert_called_once_with(self.test_email)
send_simple.reset_mock()
response = self.app.post(
'/bugs/1/update_ticket',
{'summary': 'test',
'description': 'update'})
assert send_simple.call_count == 1, send_simple.call_count
send_simple.assert_called_with(self.test_email)
send_simple.reset_mock()
response = response.follow()
for f in response.html.findAll('form'):
# Dirty way to find comment form
if (('thread' in f['action']) and ('post' in f['action'])):
params = {i['name']: i.get('value', '')
for i in f.findAll('input')
if i.has_key('name')}
params[f.find('textarea')['name']] = 'foobar'
self.app.post(str(f['action']), params)
break # Do it only once if many forms met
assert send_simple.call_count == 1, send_simple.call_count
send_simple.assert_called_with(self.test_email)
@patch('forgetracker.tracker_main.M.Notification.send_simple')
def test_notifications_off(self, send_simple):
"""Test that tracker notification email is not sent if notifications
are disabled at the project level.
"""
p = M.Project.query.get(shortname='test')
p.notifications_disabled = True
ThreadLocalORMSession.flush_all()
self._set_options()
self.new_ticket(summary='test')
assert send_simple.call_count == 0, send_simple.call_count
class TestCustomUserField(TrackerTestController):
def setUp(self):
super(TestCustomUserField, self).setUp()
params = dict(
custom_fields=[
dict(name='_code_review', label='Code Review', type='user',
show_in_search='on')],
open_status_names='aa bb',
closed_status_names='cc',
)
self.app.post(
'/admin/bugs/set_custom_fields',
params=variable_encode(params))
def test_blank_user(self):
kw = {'custom_fields._code_review': ''}
ticket_view = self.new_ticket(summary='test custom fields', **kw).follow()
# summary header shows 'nobody'
assert ticket_view.html.findAll('label', 'simple',
text='Code Review:')[1].parent.parent.text == 'Code Review:nobody'
# form input is blank
assert ticket_view.html.find('input',
dict(name='ticket_form.custom_fields._code_review'))['value'] == ''
def test_non_project_member(self):
""" Test that you can't put a non-project-member user in a custom
user field.
"""
kw = {'custom_fields._code_review': 'test-user-0'}
ticket_view = self.new_ticket(summary='test custom fields', **kw).follow()
# summary header shows 'nobody'
assert ticket_view.html.findAll('label', 'simple',
text='Code Review:')[1].parent.parent.text == 'Code Review:nobody'
# form input is blank
assert ticket_view.html.find('input',
dict(name='ticket_form.custom_fields._code_review'))['value'] == ''
def test_project_member(self):
kw = {'custom_fields._code_review': 'test-admin'}
ticket_view = self.new_ticket(summary='test custom fields', **kw).follow()
# summary header shows 'nobody'
assert ticket_view.html.findAll('label', 'simple',
text='Code Review:')[1].parent.parent.text == 'Code Review:Test Admin'
# form input is blank
assert ticket_view.html.find('input',
dict(name='ticket_form.custom_fields._code_review'))['value'] == 'test-admin'
def test_change_user_field(self):
kw = {'custom_fields._code_review': ''}
r = self.new_ticket(summary='test custom fields', **kw).follow()
f = r.forms[1]
f['ticket_form.custom_fields._code_review'] = 'test-admin'
r = f.submit().follow()
assert '<li><strong>code_review</strong>: Test Admin' in r
def test_search_results(self):
kw = {'custom_fields._code_review': 'test-admin'}
self.new_ticket(summary='test custom fields', **kw)
r = self.app.get('/bugs/')
assert r.html.find('table', 'ticket-list').findAll('th')[5].text == 'Code Review'
assert r.html.find('table', 'ticket-list').tbody.tr.findAll('td')[5].text == 'Test Admin'
class TestHelpTextOptions(TrackerTestController):
def _set_options(self, new_txt='', search_txt=''):
r = self.app.post('/admin/bugs/set_options', params={
'TicketHelpNew': new_txt,
'TicketHelpSearch': search_txt,
})
return r
def test_help_text(self):
self._set_options(
new_txt='**foo**',
search_txt='*bar*')
r = self.app.get('/bugs/')
assert '<em>bar</em>' in r
r = self.app.get('/bugs/search', params=dict(q='test'))
assert '<em>bar</em>' in r
r = self.app.get('/bugs/milestone/1.0/')
assert '<em>bar</em>' in r
r = self.app.get('/bugs/new/')
assert '<strong>foo</strong>' in r
self._set_options()
r = self.app.get('/bugs/')
assert len(r.html.findAll(attrs=dict(id='search-ticket-help-msg'))) == 0
r = self.app.get('/bugs/search', params=dict(q='test'))
assert len(r.html.findAll(attrs=dict(id='search-ticket-help-msg'))) == 0
r = self.app.get('/bugs/milestone/1.0/')
assert len(r.html.findAll(attrs=dict(id='search-ticket-help-msg'))) == 0
r = self.app.get('/bugs/new/')
assert len(r.html.findAll(attrs=dict(id='new-ticket-help-msg'))) == 0
def sidebar_contains(response, text):
sidebar_menu = response.html.find('div', attrs={'id': 'sidebar'})
return text in str(sidebar_menu)
|
pombredanne/SourceForge-Allura
|
ForgeTracker/forgetracker/tests/functional/test_root.py
|
Python
|
apache-2.0
| 50,244
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.estimators.tensor_signature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
class TensorSignatureTest(tf.test.TestCase):
def testTensorSignatureCompatible(self):
placeholder_a = tf.placeholder(name='test',
shape=[None, 100],
dtype=tf.int32)
placeholder_b = tf.placeholder(name='another',
shape=[256, 100],
dtype=tf.int32)
placeholder_c = tf.placeholder(name='mismatch',
shape=[256, 100],
dtype=tf.float32)
placeholder_d = tf.placeholder(name='mismatch',
shape=[128, 100],
dtype=tf.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
self.assertTrue(tensor_signature.tensors_compatible(placeholder_a,
signatures))
self.assertTrue(tensor_signature.tensors_compatible(placeholder_b,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_c,
signatures))
self.assertTrue(tensor_signature.tensors_compatible(placeholder_d,
signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
self.assertTrue(tensor_signature.tensors_compatible(inputs, signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_a,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_b,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(
{'b': placeholder_b}, signatures))
self.assertTrue(tensor_signature.tensors_compatible(
{'a': placeholder_b,
'c': placeholder_c}, signatures))
self.assertFalse(tensor_signature.tensors_compatible(
{'a': placeholder_c}, signatures))
def testSparseTensorCompatible(self):
t = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
signatures = tensor_signature.create_signatures(t)
self.assertTrue(tensor_signature.tensors_compatible(t, signatures))
def testTensorSignaturePlaceholders(self):
placeholder_a = tf.placeholder(name='test',
shape=[None, 100],
dtype=tf.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
placeholder_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholder_out.dtype, placeholder_a.dtype)
self.assertTrue(placeholder_out.get_shape().is_compatible_with(
placeholder_a.get_shape()))
self.assertTrue(tensor_signature.tensors_compatible(placeholder_out,
signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
placeholders_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholders_out['a'].dtype, placeholder_a.dtype)
self.assertTrue(
placeholders_out['a'].get_shape().is_compatible_with(
placeholder_a.get_shape()))
self.assertTrue(tensor_signature.tensors_compatible(placeholders_out,
signatures))
def testSparseTensorSignaturePlaceholders(self):
tensor = tf.SparseTensor(values=[1.0, 2.0], indices=[[0, 2], [0, 3]],
shape=[5, 5])
signature = tensor_signature.create_signatures(tensor)
placeholder = tensor_signature.create_placeholders_from_signatures(
signature)
self.assertTrue(isinstance(placeholder, tf.SparseTensor))
self.assertEqual(placeholder.values.dtype, tensor.values.dtype)
if __name__ == '__main__':
tf.test.main()
|
TakayukiSakai/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/tensor_signature_test.py
|
Python
|
apache-2.0
| 5,029
|
import os
import re
import cx_Oracle
import collections
import datetime
earContentionCode = [2200,2210,2220,3140,3150,4130,4210,4700,4920,5000,5010,5710,6850]
#Primary query, Look for all claims/contentions where the participant has at least one contention with an ear-related contention code.
#Organize them based first by participant id, then claim id and finally by profile date descending.
SQL="select rcc.ptcpnt_vet_id, \
bnft_claim_id, \
date_of_claim, \
prfil_dt, \
claim_ro_number, \
cntntn_id, \
cntntn_clsfcn_id, \
cntntn_clmant_txt, \
p.dob, \
end_prdct_type_cd \
from combo_rating_corp_claim rcc \
left join ah2626_person p on p.ptcpnt_vet_id = rcc.ptcpnt_vet_id \
inner join v_ear_claim_source cs on cs.vet_id = rcc.ptcpnt_vet_id and cs.claim_id = rcc.bnft_claim_id \
where prfil_dt >= date_of_claim \
order by rcc.ptcpnt_vet_id desc,bnft_claim_id,prfil_dt"
class AggregateContention:
def __init__(self):
self.VET_ID = None
self.CLAIM_ID = None
self.DOB = 0
self.END_PRODUCT_CODE = None
self.RO_NUMBER = 0
self.CLAIM_DATE = None
self.MAX_PROFILE_DATE = None
self.CONTENTION_COUNT = 0
self.EAR_CONTENTION_COUNT = 0
self.C2200 = 0
self.C2210 = 0
self.C2220 = 0
self.C3140 = 0
self.C3150 = 0
self.C4130 = 0
self.C4210 = 0
self.C4700 = 0
self.C4920 = 0
self.C5000 = 0
self.C5010 = 0
self.C5710 = 0
self.C6850 = 0
self.TXT_LOSS = 0
self.TXT_TINITU = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class Contention:
def __init__(self, ptcpnt_vet_id, bnft_claim_id, claim_date, prfil_dt, claim_ro_number, cntntn_id, cntntn_clsfcn_id, cntntn_clmant_txt, dob, end_prdct_type_cd):
self.ptcpnt_vet_id = ptcpnt_vet_id
self.bnft_claim_id = bnft_claim_id
self.claim_date = claim_date
self.prfil_dt = prfil_dt
self.claim_ro_number = claim_ro_number
self.cntntn_id = cntntn_id
self.cntntn_clsfcn_id = cntntn_clsfcn_id
self.cntntn_clmant_txt = cntntn_clmant_txt
if not dob is None:
self.dob = int(dob)
else:
self.dob = None
self.end_prdct_type_cd = end_prdct_type_cd
def __str__(self):
from pprint import pprint
return str(vars(self))
connection = cx_Oracle.connect('developer/D3vVV0Rd@127.0.0.1:1521/DEV.BCDSS')
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.V_EAR_AGGREGATE_CONTENTION (VET_ID, CLAIM_ID, END_PRODUCT_CODE, CLAIM_DATE, CONTENTION_COUNT, EAR_CONTENTION_COUNT, C2200,C2210, C2220,C3140,C3150,C4130,C4210,C4700,C4920,C5000,C5010,C5710, C6850, TXT_LOSS, TXT_TINITU, DOB, RO_NUMBER, MAX_PROFILE_DATE) \
VALUES (:VET_ID, :CLAIM_ID, :END_PRODUCT_CODE, :CLAIM_DATE, :CONTENTION_COUNT, :EAR_CONTENTION_COUNT, \
:C2200, :C2210, :C2220, :C3140, :C3150, :C4130 , :C4210, :C4700, :C4920, :C5000, :C5010, :C5710, :C6850, \
:TXT_LOSS, :TXT_TINITU, \
:DOB, :RO_NUMBER, :MAX_PROFILE_DATE)')
print(str(datetime.datetime.now()))
cursor = connection.cursor()
cursor.execute(SQL)
aggregateContention = None
counterAggregateContention = None
totalContentions = None
totalEarContentions = None
maxProfileDate = None
currBenefitClaim = -1
currParticipant = -1
counter = 0;
for row in cursor:
if counter == 1000: #Commit every 1000 records. Improvement would be to look into aggregate inserts
connection.commit()
counter=0
contention = Contention(row[0],row[1],row[2],row[3],row[4],row[5],row[6], row[7], row[8], row[9]) #Map loose fields into a Contention object. (Contention is a convenience object)
if currBenefitClaim != contention.bnft_claim_id: #Process insert statement and reset aggregation variables when claim id changes
if currBenefitClaim != -1: #Skip if first time through
#Perform all aggregation calculations before inserting and resetting
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDate[currBenefitClaim]
writeCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'CLAIM_ID' :aggregateContention.CLAIM_ID, 'END_PRODUCT_CODE' :aggregateContention.END_PRODUCT_CODE, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE, 'CONTENTION_COUNT' :aggregateContention.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :aggregateContention.EAR_CONTENTION_COUNT,
'C2200' :counterAggregateContention.C2200, 'C2210' :counterAggregateContention.C2210, 'C2220' :counterAggregateContention.C2220, 'C3140' :counterAggregateContention.C3140, 'C3150' :counterAggregateContention.C3150, 'C4130' :counterAggregateContention.C4130, 'C4210' :counterAggregateContention.C4210, 'C4700' :counterAggregateContention.C4700, 'C4920' :counterAggregateContention.C4920, 'C5000' :counterAggregateContention.C5000, 'C5010' :counterAggregateContention.C5010, 'C5710' :counterAggregateContention.C5710, 'C6850' :counterAggregateContention.C6850,
'TXT_LOSS' :counterAggregateContention.TXT_LOSS, 'TXT_TINITU' :counterAggregateContention.TXT_TINITU,
'DOB' :aggregateContention.DOB, 'RO_NUMBER' :aggregateContention.RO_NUMBER, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE})
counter += 1
currBenefitClaim = contention.bnft_claim_id #Reset claim id
if currParticipant != contention.ptcpnt_vet_id :
currParticipant = contention.ptcpnt_vet_id #Reset participant id
counterAggregateContention = AggregateContention()
#Capture all claim/person level items that do not change per contention
aggregateContention = AggregateContention()
aggregateContention.VET_ID = contention.ptcpnt_vet_id
aggregateContention.CLAIM_ID = currBenefitClaim
aggregateContention.RO_NUMBER = contention.claim_ro_number
aggregateContention.DOB = contention.dob
aggregateContention.CLAIM_DATE = contention.claim_date
aggregateContention.END_PRODUCT_CODE = contention.end_prdct_type_cd
#Reset the counters
totalContentions = collections.Counter();
totalEarContentions = collections.Counter();
maxProfileDate = collections.Counter();
maxProfileDate[currBenefitClaim] = contention.prfil_dt #If a claim has multiple profile dates, because of the sorting, we always end up with the most recent profile date
totalContentions[currBenefitClaim] += 1 #For every contention add one
if contention.cntntn_clsfcn_id in earContentionCode:
totalEarContentions[currBenefitClaim] +=1 #For any contention that is ear-related, add one
#Use regex to look for a hit and then if it hits make it true. No need to track how many times, just true or false
if re.search("Loss",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_LOSS += 1
if re.search("Tinnitus",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_TINITU += 1
#Simply test the codes and again true or false
if contention.cntntn_clsfcn_id == 2200:
counterAggregateContention.C2200 += 1
if contention.cntntn_clsfcn_id == 2210:
counterAggregateContention.C2210 += 1
if contention.cntntn_clsfcn_id == 2220:
counterAggregateContention.C2220 += 1
if contention.cntntn_clsfcn_id == 3140:
counterAggregateContention.C3140 += 1
if contention.cntntn_clsfcn_id == 3150:
counterAggregateContention.C3150 += 1
if contention.cntntn_clsfcn_id == 4130:
counterAggregateContention.C4130 += 1
if contention.cntntn_clsfcn_id == 4210:
counterAggregateContention.C4210 += 1
if contention.cntntn_clsfcn_id == 4700:
counterAggregateContention.C4700 += 1
if contention.cntntn_clsfcn_id == 4920:
counterAggregateContention.C4920 += 1
if contention.cntntn_clsfcn_id == 5000:
counterAggregateContention.C5000 += 1
if contention.cntntn_clsfcn_id == 5010:
counterAggregateContention.C5010 += 1
if contention.cntntn_clsfcn_id == 5710:
counterAggregateContention.C5710 += 1
if contention.cntntn_clsfcn_id == 6850:
counterAggregateContention.C6850 += 1
#A bit strange looking but due to Python's identation approach this occurs after the for loop in order to capture the last claim.
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDate[currBenefitClaim]
writeCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'CLAIM_ID' :aggregateContention.CLAIM_ID, 'END_PRODUCT_CODE' :aggregateContention.END_PRODUCT_CODE, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE, 'CONTENTION_COUNT' :aggregateContention.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :aggregateContention.EAR_CONTENTION_COUNT,
'C2200' :counterAggregateContention.C2200, 'C2210' :counterAggregateContention.C2210, 'C2220' :counterAggregateContention.C2220, 'C3140' :counterAggregateContention.C3140, 'C3150' :counterAggregateContention.C3150, 'C4130' :counterAggregateContention.C4130, 'C4210' :counterAggregateContention.C4210, 'C4700' :counterAggregateContention.C4700, 'C4920' :counterAggregateContention.C4920, 'C5000' :counterAggregateContention.C5000, 'C5010' :counterAggregateContention.C5010, 'C5710' :counterAggregateContention.C5710, 'C6850' :counterAggregateContention.C6850,
'TXT_LOSS' :counterAggregateContention.TXT_LOSS, 'TXT_TINITU' :counterAggregateContention.TXT_TINITU,
'DOB' :aggregateContention.DOB, 'RO_NUMBER' :aggregateContention.RO_NUMBER, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE})
connection.commit()
print(str(datetime.datetime.now()))
writeCursor.close()
cursor.close()
connection.close()
|
VHAINNOVATIONS/BCDS
|
Model/scripts/ear_validation/python/aggregateContention.py
|
Python
|
apache-2.0
| 9,448
|
import logging
from xml.dom.minidom import *
from jinja2 import Environment, Template
from edge.dateutility import DateUtility
from edge.opensearch.response import Response
class TemplateResponse(Response):
def __init__(self):
super(TemplateResponse, self).__init__()
self.env = Environment()
self.env.trim_blocks = True
self.env.autoescape = True
self.variables = {}
self.env.filters['convertISOTime'] = DateUtility.convertISOTime
def setTemplate(self, template):
self.template = self.env.from_string(template)
def generate(self, pretty=False):
logging.debug('TemplateResponse.generate is called.')
if pretty:
try :
xmlStr = self.template.render(self.variables).encode('utf-8').replace('\n', '')
except Exception as e:
logging.debug("Problem generating template " + str(e))
xmlStr = self.template.render({}).encode('utf-8').replace('\n', '')
document = xml.dom.minidom.parseString(xmlStr)
return document.toprettyxml()
else:
return self.template.render(self.variables).replace('\n', '')
|
dataplumber/edge
|
src/main/python/libraries/edge/opensearch/templateresponse.py
|
Python
|
apache-2.0
| 1,202
|
# Copyright 2017 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystoneauth1 import exceptions as kse
import webob
from nova.api.openstack import identity
from nova import test
class FakeResponse(object):
"""A basic response constainer that simulates requests.Response.
One of the critical things is that a success error code makes the
object return true.
"""
def __init__(self, status_code, content=""):
self.status_code = status_code
self.content = content
def __bool__(self):
# python 3
return self.__nonzero__()
def __nonzero__(self):
# python 2
return self.status_code < 400
@property
def text(self):
return self.content
class IdentityValidationTest(test.NoDBTestCase):
@mock.patch('keystoneauth1.session.Session.get')
def test_good_id(self, get):
get.return_value = FakeResponse(200)
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_no_project(self, get):
get.return_value = FakeResponse(404)
self.assertRaises(webob.exc.HTTPBadRequest,
identity.verify_project_id,
mock.MagicMock(), "foo")
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_unknown_id(self, get):
get.return_value = FakeResponse(403)
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_unknown_error(self, get):
get.return_value = FakeResponse(500, "Oh noes!")
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_early_fail(self, get):
get.side_effect = kse.EndpointNotFound()
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
|
rajalokan/nova
|
nova/tests/unit/test_identity.py
|
Python
|
apache-2.0
| 3,137
|
#!/usr/bin/env python3
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, \
VppIpTable, FibPathProto
from vpp_acl import AclRule, VppAcl
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from ipaddress import IPv4Network, IPv6Network
from vpp_object import VppObject
NUM_PKTS = 67
def find_abf_policy(test, id):
policies = test.vapi.abf_policy_dump()
for p in policies:
if id == p.policy.policy_id:
return True
return False
def find_abf_itf_attach(test, id, sw_if_index):
attachs = test.vapi.abf_itf_attach_dump()
for a in attachs:
if id == a.attach.policy_id and \
sw_if_index == a.attach.sw_if_index:
return True
return False
class VppAbfPolicy(VppObject):
def __init__(self,
test,
policy_id,
acl,
paths):
self._test = test
self.policy_id = policy_id
self.acl = acl
self.paths = paths
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode())
def add_vpp_config(self):
self._test.vapi.abf_policy_add_del(
1,
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
'paths': self.encoded_paths})
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.abf_policy_add_del(
0,
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
'paths': self.encoded_paths})
def query_vpp_config(self):
return find_abf_policy(self._test, self.policy_id)
def object_id(self):
return ("abf-policy-%d" % self.policy_id)
class VppAbfAttach(VppObject):
def __init__(self,
test,
policy_id,
sw_if_index,
priority,
is_ipv6=0):
self._test = test
self.policy_id = policy_id
self.sw_if_index = sw_if_index
self.priority = priority
self.is_ipv6 = is_ipv6
def add_vpp_config(self):
self._test.vapi.abf_itf_attach_add_del(
1,
{'policy_id': self.policy_id,
'sw_if_index': self.sw_if_index,
'priority': self.priority,
'is_ipv6': self.is_ipv6})
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.abf_itf_attach_add_del(
0,
{'policy_id': self.policy_id,
'sw_if_index': self.sw_if_index,
'priority': self.priority,
'is_ipv6': self.is_ipv6})
def query_vpp_config(self):
return find_abf_itf_attach(self._test,
self.policy_id,
self.sw_if_index)
def object_id(self):
return ("abf-attach-%d-%d" % (self.policy_id, self.sw_if_index))
class TestAbf(VppTestCase):
""" ABF Test Case """
@classmethod
def setUpClass(cls):
super(TestAbf, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestAbf, cls).tearDownClass()
def setUp(self):
super(TestAbf, self).setUp()
self.create_pg_interfaces(range(5))
for i in self.pg_interfaces[:4]:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
super(TestAbf, self).tearDown()
def test_abf4(self):
""" IPv4 ACL Based Forwarding
"""
#
# We are not testing the various matching capabilities
# of ACLs, that's done elsewhere. Here ware are testing
# the application of ACLs to a forwarding path to achieve
# ABF
# So we construct just a few ACLs to ensure the ABF policies
# are correctly constructed and used. And a few path types
# to test the API path decoding.
#
#
# Rule 1
#
rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
src_prefix=IPv4Network("1.1.1.1/32"),
dst_prefix=IPv4Network("1.1.1.2/32"))
acl_1 = VppAcl(self, rules=[rule_1])
acl_1.add_vpp_config()
#
# ABF policy for ACL 1 - path via interface 1
#
abf_1 = VppAbfPolicy(self, 10, acl_1,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
abf_1.add_vpp_config()
#
# Attach the policy to input interface Pg0
#
attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index, 50)
attach_1.add_vpp_config()
#
# fire in packet matching the ACL src,dst. If it's forwarded
# then the ABF was successful, since default routing will drop it
#
p_1 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="1.1.1.1", dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg1)
#
# Attach a 'better' priority policy to the same interface
#
abf_2 = VppAbfPolicy(self, 11, acl_1,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index)])
abf_2.add_vpp_config()
attach_2 = VppAbfAttach(self, 11, self.pg0.sw_if_index, 40)
attach_2.add_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
#
# Attach a policy with priority in the middle
#
abf_3 = VppAbfPolicy(self, 12, acl_1,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)])
abf_3.add_vpp_config()
attach_3 = VppAbfAttach(self, 12, self.pg0.sw_if_index, 45)
attach_3.add_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
#
# remove the best priority
#
attach_2.remove_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg3)
#
# Attach one of the same policies to Pg1
#
attach_4 = VppAbfAttach(self, 12, self.pg1.sw_if_index, 45)
attach_4.add_vpp_config()
p_2 = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="1.1.1.1", dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg1, p_2 * NUM_PKTS, self.pg3)
#
# detach the policy from PG1, now expect traffic to be dropped
#
attach_4.remove_vpp_config()
self.send_and_assert_no_replies(self.pg1, p_2 * NUM_PKTS, "Detached")
#
# Swap to route via a next-hop in the non-default table
#
table_20 = VppIpTable(self, 20)
table_20.add_vpp_config()
self.pg4.set_table_ip4(table_20.table_id)
self.pg4.admin_up()
self.pg4.config_ip4()
self.pg4.resolve_arp()
abf_13 = VppAbfPolicy(self, 13, acl_1,
[VppRoutePath(self.pg4.remote_ip4,
0xffffffff,
nh_table_id=table_20.table_id)])
abf_13.add_vpp_config()
attach_5 = VppAbfAttach(self, 13, self.pg0.sw_if_index, 30)
attach_5.add_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg4)
self.pg4.unconfig_ip4()
self.pg4.set_table_ip4(0)
def test_abf6(self):
""" IPv6 ACL Based Forwarding
"""
#
# Simple test for matching IPv6 packets
#
#
# Rule 1
#
rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
src_prefix=IPv6Network("2001::2/128"),
dst_prefix=IPv6Network("2001::1/128"))
acl_1 = VppAcl(self, rules=[rule_1])
acl_1.add_vpp_config()
#
# ABF policy for ACL 1 - path via interface 1
#
abf_1 = VppAbfPolicy(self, 10, acl_1,
[VppRoutePath("3001::1",
0xffffffff)])
abf_1.add_vpp_config()
attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index,
45, is_ipv6=True)
attach_1.add_vpp_config()
#
# a packet matching the rule
#
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src="2001::2", dst="2001::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
#
# packets are dropped because there is no route to the policy's
# next hop
#
self.send_and_assert_no_replies(self.pg1, p * NUM_PKTS, "no route")
#
# add a route resolving the next-hop
#
route = VppIpRoute(self, "3001::1", 32,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index)])
route.add_vpp_config()
#
# now expect packets forwarded.
#
self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg1)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
FDio/vpp
|
test/test_abf.py
|
Python
|
apache-2.0
| 10,057
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from functools import partial
import testtools
from tempest import config
from tempest.exceptions import InvalidServiceTag
from tempest.lib.common.utils import data_utils as lib_data_utils
from tempest.lib import decorators
CONF = config.CONF
class DataUtils(object):
def __getattr__(self, attr):
if attr == 'rand_name':
# NOTE(flwang): This is a proxy to generate a random name that
# includes a random number and a prefix 'tempest'
attr_obj = partial(lib_data_utils.rand_name,
prefix='tempest')
else:
attr_obj = getattr(lib_data_utils, attr)
self.__dict__[attr] = attr_obj
return attr_obj
data_utils = DataUtils()
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'volume': CONF.service_available.cinder,
# NOTE(masayukig): We have two network services which are neutron and
# nova-network. And we have no way to know whether nova-network is
# available or not. After the pending removal of nova-network from
# nova, we can treat the network/neutron case in the same manner as
# the other services.
'network': True,
# NOTE(masayukig): Tempest tests always require the identity service.
# So we should set this True here.
'identity': True,
'object_storage': CONF.service_available.swift,
}
return service_list
def services(*args):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
known_services = get_service_list()
for service in args:
if service not in known_services:
raise InvalidServiceTag('%s is not a valid service' % service)
decorators.attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(*func_args, **func_kwargs)
return wrapper
return decorator
def requires_ext(**kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
'identity': CONF.identity_feature_enabled.api_extensions
}
if not config_dict[service]:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
|
cisco-openstack/tempest
|
tempest/common/utils/__init__.py
|
Python
|
apache-2.0
| 4,477
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Gathers components according to their history.
This algorithm is a test: it can be a memory hog
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import operator
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Invalidate, Validate, Requires
# Composer
import cohorte.composer
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_NODE_CRITERION_DISTANCE)
@Requires('_status', cohorte.composer.SERVICE_STATUS_NODE)
@Instantiate('cohorte-composer-node-criterion-compatibility')
class HistoryCriterion(object):
"""
Gathers components which never crashed when they were in the same isolate
"""
def __init__(self):
"""
Sets up members
"""
# A set of tuples: each tuple contains the components which were in an
# isolate that crashed
self._crashes = set()
# Injected
self._status = None
def __str__(self):
"""
String representation
"""
return "Components gathering based on history"
@Validate
def validate(self, context):
"""
Component validated
"""
# TODO: load previous crashes from a file/db...
self._crashes.clear()
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# TODO: store crashes to a file/db...
self._crashes.clear()
def handle_event(self, event):
"""
Does nothing: this elector only cares about what is written in
configuration files
"""
# Get the implicated components
components = set(component.name for component in event.components)
if event.kind == 'isolate.lost':
self.on_crash(components)
def on_crash(self, components):
"""
An isolate has been lost
:param components: Names of the components in the crashed isolate
"""
if len(components) == 1:
# Do not forbid a group of 1 component
# (or it would never be instantiated again)
return
# Consolidate history
crash = frozenset(components)
to_remove = []
for old_crash in self._crashes:
if crash.issubset(old_crash):
to_remove.append(old_crash)
for old_crash in to_remove:
self._crashes.remove(old_crash)
# Store the isolate composition at the time of crash
self._crashes.add(tuple(sorted(components)))
_logger.info("%d crash(es) in history:\n%s", len(self._crashes),
'\n'.join('- ' + ', '.join(crash)
for crash in self._crashes))
def vote(self, candidates, subject, ballot):
"""
Votes for the isolate(s) with the minimal compatibility distance
:param candidates: Isolates to vote for
:param subject: The component to place
:param ballot: The vote ballot
"""
# Subject component name
component_name = subject.name
# Preference for candidate: (number of components, candidate)
preference = []
# Neutral isolate (last resort)
neutral_candidate = None
# Prepare a dictionary: candidate -> components
all_components = {}
for candidate in candidates:
components = sorted(component.name
for component in candidate.components)
if not components and not candidate.name:
# Found the neutral isolate (do not add it to 'all_components')
neutral_candidate = candidate
else:
if component_name in components:
# Found the isolate where the isolate already is
components.remove(component_name)
# Store information
all_components[candidate] = components
# Sort candidates by number of components already there
sorted_candidates = [(len(content), candidate)
for candidate, content in all_components.items()]
sorted_candidates.sort(key=lambda x: (-x[0], x[1].name))
# Compute candidate preference (empty or OK)
for _, candidate in sorted_candidates:
# Analyze each candidate
components = all_components[candidate]
if not components:
# No components, we're OK with it
preference.append((0, candidate))
else:
# Ensure that the content of this isolate won't be a known
# crashing solution
future_content = set(components)
future_content.add(component_name)
for crash in self._crashes:
if future_content.issuperset(crash):
# Solution is (a superset of) a crashing solution
_logger.info(
"Known bad solution for %s:\n%s\ndue to:\n%s",
component_name,
', '.join(name for name in sorted(future_content)),
', '.join(name for name in sorted(crash)))
ballot.append_against(candidate)
break
else:
# Not a crashing solution
preference.append((len(components), candidate))
# TODO: tweak vote preferences to reduce the number of moves
if preference:
# Sort results (greater is better: it gathers components)
preference.sort(key=operator.itemgetter(0), reverse=True)
_logger.info("Vote preference for %s: %s",
component_name, ', '.join(item[1].name or "Neutral"
for item in preference))
# Vote
for _, candidate in preference:
ballot.append_for(candidate)
elif neutral_candidate is not None:
# We voted for no one: vote for neutral
_logger.info("Using neutral candidate for %s", component_name)
ballot.append_for(neutral_candidate)
# Lock our vote
ballot.lock()
|
isandlaTech/cohorte-devtools
|
qualifier/deploy/cohorte-home/repo/cohorte/composer/node/criteria/distance/history.py
|
Python
|
apache-2.0
| 7,358
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
subramani95/neutron
|
neutron/agent/linux/iptables_manager.py
|
Python
|
apache-2.0
| 24,792
|
import time
import sqlalchemy as SA
import pushmanager.core.db as db
import pushmanager.core.util
from pushmanager.core.mail import MailQueue
from pushmanager.core.rb import RBQueue
from pushmanager.core.requesthandler import RequestHandler
class LivePushServlet(RequestHandler):
def _arg(self, key):
return pushmanager.core.util.get_str_arg(self.request, key, '')
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushid = pushmanager.core.util.get_int_arg(self.request, 'id')
push_query = db.push_pushes.update().where(db.push_pushes.c.id == self.pushid).values({
'state': 'live',
'modified': time.time(),
})
request_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.state == 'blessed',
SA.exists(
[1],
SA.and_(
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
)
))).values({
'state': 'live',
'modified': time.time(),
})
reset_query = db.push_requests.update().where(
SA.exists(
[1],
SA.and_(
db.push_requests.c.state == 'pickme',
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
)
)).values({
'state': 'requested',
})
delete_query = db.push_pushcontents.delete().where(
SA.exists([1], SA.and_(
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
db.push_requests.c.state == 'requested',
)))
live_query = db.push_requests.select().where(
SA.and_(db.push_requests.c.state == 'live',
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id)
)
db.execute_transaction_cb(
[push_query, request_query, reset_query, delete_query, live_query],
self.on_db_complete,
)
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
_, _, _, _, live_requests = db_results
for req in live_requests:
if req['reviewid']:
review_id = int(req['reviewid'])
RBQueue.enqueue_review(review_id)
if req['watchers']:
user_string = '%s (%s)' % (req['user'], req['watchers'])
users = [req['user']] + req['watchers'].split(',')
else:
user_string = req['user']
users = [req['user']]
msg = (
"""
<p>
%(pushmaster)s has certified request for %(user)s as stable in production:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Regards,<br />
PushManager
</p>"""
) % pushmanager.core.util.EscapedDict({
'pushmaster': self.current_user,
'user': user_string,
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
})
subject = "[push] %s - %s" % (user_string, req['title'])
MailQueue.enqueue_user_email(users, msg, subject)
|
Yelp/pushmanager
|
pushmanager/servlets/livepush.py
|
Python
|
apache-2.0
| 3,789
|
import os
from pathlib import Path
import gi
import logging
from gi.repository import Gtk
import json_config
from .login_window import LoginWindow
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
config = json_config.connect('config.json')
gi.require_version('Gtk', '3.0')
class WatsonCredentialsDialog(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Enter Credentials", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
username_field = Gtk.Entry()
username_field.set_placeholder_text("Username")
password_field = Gtk.Entry()
password_field.set_placeholder_text("Password")
password_field.set_visibility(False)
password_field.set_invisible_char('*')
self.username_field = username_field
self.password_field = password_field
box = self.get_content_area()
box.set_margin_top(10)
box.set_margin_bottom(10)
box.set_margin_left(10)
box.set_margin_right(10)
box.set_spacing(10)
box.add(username_field)
box.add(password_field)
self.show_all()
class BingCredentialDialog(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Enter API Key", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
api_key_field = Gtk.Entry()
api_key_field.set_placeholder_text("API Key")
self.api_key_field = api_key_field
box = self.get_content_area()
box.set_margin_top(10)
box.set_margin_bottom(10)
box.set_margin_left(10)
box.set_margin_right(10)
box.set_spacing(10)
box.add(api_key_field)
self.show_all()
class ConfigurationWindow:
def __init__(self) -> None:
super().__init__()
builder = Gtk.Builder()
builder.add_from_file(os.path.join(
TOP_DIR, "glade_files/configure.glade"))
self.window = builder.get_object("configuration_window")
self.stt_combobox = builder.get_object("stt_combobox")
self.tts_combobox = builder.get_object("tts_combobox")
self.auth_switch = builder.get_object("auth_switch")
self.snowboy_switch = builder.get_object("snowboy_switch")
self.wake_button_switch = builder.get_object("wake_button_switch")
self.init_auth_switch()
self.init_tts_combobox()
self.init_stt_combobox()
self.init_hotword_switch()
self.init_wake_button_switch()
builder.connect_signals(ConfigurationWindow.Handler(self))
self.window.set_resizable(False)
def show_window(self):
self.window.show_all()
Gtk.main()
def exit_window(self):
self.window.destroy()
Gtk.main_quit()
def init_tts_combobox(self):
default_tts = config['default_tts']
if default_tts == 'google':
self.tts_combobox.set_active(0)
elif default_tts == 'flite':
self.tts_combobox.set_active(1)
elif default_tts == 'watson':
self.tts_combobox.set_active(2)
else:
self.tts_combobox.set_active(0)
config['default_tts'] = 'google'
def init_stt_combobox(self):
default_stt = config['default_stt']
if default_stt == 'google':
self.stt_combobox.set_active(0)
elif default_stt == 'watson':
self.stt_combobox.set_active(1)
elif default_stt == 'bing':
self.stt_combobox.set_active(2)
else:
self.tts_combobox.set_active(0)
config['default_tts'] = 'google'
def init_auth_switch(self):
usage_mode = config['usage_mode']
if usage_mode == 'authenticated':
self.auth_switch.set_active(True)
else:
self.auth_switch.set_active(False)
def init_hotword_switch(self):
try:
parent_dir = os.path.dirname(TOP_DIR)
snowboyDetectFile = Path(os.path.join(
parent_dir, "hotword_engine/snowboy/_snowboydetect.so"))
print(snowboyDetectFile)
if not snowboyDetectFile.exists():
self.snowboy_switch.set_sensitive(False)
config['hotword_engine'] = 'PocketSphinx'
except Exception as e:
logging.error(e)
config['hotword_engine'] = 'PocketSphinx'
if config['hotword_engine'] == 'Snowboy':
self.snowboy_switch.set_active(True)
else:
self.snowboy_switch.set_active(False)
def init_wake_button_switch(self):
try:
import RPi.GPIO
if config['WakeButton'] == 'enabled':
self.wake_button_switch.set_active(True)
else:
self.wake_button_switch.set_active(False)
except ImportError:
self.wake_button_switch.set_sensitive(False)
class Handler:
def __init__(self, config_window):
self.config_window = config_window
def on_delete_window(self, *args):
print('Exiting')
self.config_window.exit_window()
def on_stt_combobox_changed(self, combo: Gtk.ComboBox):
selection = combo.get_active()
if selection == 0:
config['default_stt'] = 'google'
elif selection == 1:
credential_dialog = WatsonCredentialsDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
username = credential_dialog.username_field.get_text()
password = credential_dialog.password_field.get_text()
config['default_stt'] = 'watson'
config['watson_stt_config']['username'] = username
config['watson_stt_config']['password'] = password
else:
self.config_window.init_stt_combobox()
credential_dialog.destroy()
elif selection == 2:
credential_dialog = BingCredentialDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
api_key = credential_dialog.api_key_field.get_text()
config['default_stt'] = 'bing'
config['bing_speech_api_key']['username'] = api_key
else:
self.config_window.init_stt_combobox()
credential_dialog.destroy()
def on_tts_combobox_changed(self, combo):
selection = combo.get_active()
if selection == 0:
config['default_tts'] = 'google'
elif selection == 1:
config['default_tts'] = 'flite'
elif selection == 2:
credential_dialog = WatsonCredentialsDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
username = credential_dialog.username_field.get_text()
password = credential_dialog.password_field.get_text()
config['default_tts'] = 'watson'
config['watson_tts_config']['username'] = username
config['watson_tts_config']['password'] = password
config['watson_tts_config']['voice'] = 'en-US_AllisonVoice'
else:
self.config_window.init_tts_combobox()
credential_dialog.destroy()
def on_auth_switch_active_notify(self, switch, gparam):
if switch.get_active():
login_window = LoginWindow()
login_window.show_window()
if config['usage_mode'] == 'authenticated':
switch.set_active(True)
else:
switch.set_active(False)
def on_snowboy_switch_active_notify(self, switch, gparam):
if switch.get_active():
config['hotword_engine'] = 'Snowboy'
else:
config['hotword_engine'] = 'PocketSphinx'
def on_wake_button_switch_active_notify(self, switch, gparam):
if switch.get_active():
config['wake_button'] = 'enabled'
else:
config['wake_button'] = 'disabled'
|
betterclever/susi_linux
|
main/renderer/configuration_window.py
|
Python
|
apache-2.0
| 8,711
|