text stringlengths 4 1.02M | meta dict |
|---|---|
from setuptools import setup, find_packages
setup(
name='validata',
version='0.0.1',
author='Alex Wiltschko',
description='Continuous Integration for data',
license='MIT',
packages=find_packages(exclude='docs'),
platforms='any',
install_requires=['numpy'],
include_package_data=True,
) | {
"content_hash": "88b454e24248b297dd6d070f82069b57",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.6739130434782609,
"repo_name": "alexbw/validata",
"id": "b19014bd05ffcad2e372e5c9e95d3e334f31ec24",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2893"
},
{
"name": "Python",
"bytes": "2827"
}
],
"symlink_target": ""
} |
"""WSGI application factory for Invenio."""
from __future__ import absolute_import, print_function
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.wsgi import DispatcherMiddleware
def create_wsgi_factory(mounts_factories):
"""Create a WSGI application factory.
Usage example:
.. code-block:: python
wsgi_factory = create_wsgi_factory({'/api': create_api})
:param mounts_factories: Dictionary of mount points per application
factory.
.. versionadded:: 1.0.0
"""
def create_wsgi(app, **kwargs):
mounts = {
mount: factory(**kwargs)
for mount, factory in mounts_factories.items()
}
return DispatcherMiddleware(app.wsgi_app, mounts)
return create_wsgi
def wsgi_proxyfix(factory=None):
"""Fix ``REMOTE_ADDR`` based on ``X-Forwarded-For`` headers.
.. note::
You must set ``WSGI_PROXIES`` to the correct number of proxies,
otherwise you application is susceptible to malicious attacks.
.. versionadded:: 1.0.0
"""
def create_wsgi(app, **kwargs):
wsgi_app = factory(app, **kwargs) if factory else app.wsgi_app
if app.config.get('WSGI_PROXIES'):
return ProxyFix(wsgi_app, num_proxies=app.config['WSGI_PROXIES'])
return wsgi_app
return create_wsgi
| {
"content_hash": "7a639d82f2cd8b2078b0c79872e7cbb8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 28.319148936170212,
"alnum_prop": 0.6513899323816679,
"repo_name": "tiborsimko/invenio-base",
"id": "0b236f3dd96ad8dcabe9008013d4ec3ee6810a70",
"size": "1566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_base/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46013"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
from hintsvm import *
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name)
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name, model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None, arg4 = None):
"""
svm_train(W, y, x [, 'options']) -> model | ACC | MSE
svm_train(prob, [, 'options']) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from weighted data (W, y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
'options':
-s svm_type : set type of SVM (default 0)
0 -- C-SVC
1 -- nu-SVC
2 -- one-class SVM
3 -- epsilon-SVR
4 -- nu-SVR
5 -- hint SVM
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
assert isinstance(arg3, list)
W, y, x, options = arg1, arg2, arg3, arg4
prob = svm_problem(W, y, x)
param = svm_parameter(options)
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, "options"]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
"options":
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
print("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi)
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
print("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi)
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
print("Mean squared error = %g (regression)" % MSE)
print("Squared correlation coefficient = %g (regression)" % SCC)
else:
print("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values
| {
"content_hash": "af431585321fab1e3dc47c8c2fef93f8",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 113,
"avg_line_length": 33.42040816326531,
"alnum_prop": 0.6504640937957987,
"repo_name": "ntucllab/hintsvm",
"id": "1470bc53647da62ff3315b1815473db58ded5bf0",
"size": "8211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/hintsvmutil.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "68994"
},
{
"name": "C++",
"bytes": "104773"
},
{
"name": "HTML",
"bytes": "71294"
},
{
"name": "Java",
"bytes": "97728"
},
{
"name": "Makefile",
"bytes": "3691"
},
{
"name": "Matlab",
"bytes": "799"
},
{
"name": "Python",
"bytes": "17117"
}
],
"symlink_target": ""
} |
"""Made plugin names in kek datum non nullable
Revision ID: 47b69e523451
Revises: cd4106a1a0
Create Date: 2014-06-16 14:05:45.428226
"""
# revision identifiers, used by Alembic.
revision = '47b69e523451'
down_revision = 'cd4106a1a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('kek_data', 'plugin_name', nullable=False)
def downgrade():
op.alter_column('kek_data', 'plugin_name', nullable=True)
| {
"content_hash": "04193fddc6e0cf8f32812b8bdb8411bc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 20.227272727272727,
"alnum_prop": 0.7303370786516854,
"repo_name": "MCDong/barbican",
"id": "88d9c81cb9b96c71c8f40c100410a8e67ae448c1",
"size": "445",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "barbican/model/migration/alembic_migrations/versions/47b69e523451_made_plugin_names_in_kek_datum_non_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "1590"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1781853"
},
{
"name": "Shell",
"bytes": "15822"
}
],
"symlink_target": ""
} |
import os
import sys
import logging
import traceback
# related third party imports
import webapp2
import httpagentparser
from webapp2_extras import jinja2
from google.appengine.api import app_identity
from google.appengine.api import taskqueue
# local application/library specific imports
from bp_includes.lib import jinja_bootstrap
import i18n
def handle_error(request, response, exception):
exc_type, exc_value, exc_tb = sys.exc_info()
c = {
'exception': str(exception),
'url': request.url,
}
if request.app.config.get('send_mail_developer') is not False:
# send email
subject = "[{}] {} Error ({})".format(request.app.config.get('environment').upper(),
request.app.config.get('app_name'), exc_type.__name__)
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
ua = httpagentparser.detect(request.user_agent)
_os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-"
if 'version' in ua[_os]:
operating_system += ' ' + str(ua[_os]['version'])
if 'dist' in ua:
operating_system += ' ' + str(ua['dist'])
city = i18n.get_city_code(request)
region = i18n.get_region_code(request)
country = i18n.get_country_code(request)
coordinates = i18n.get_city_lat_long(request)
browser = ua['browser']['name'] if 'browser' in ua else "-"
browser_version = ua['browser']['version'] if 'browser' in ua else "-"
message = '<strong>Application ID:</strong> ' + app_identity.get_application_id() + "<br />" + \
'<strong>Application Version:</strong> ' + os.environ['CURRENT_VERSION_ID'] + "<br />" + \
'<hr><strong>IP Address:</strong> ' + str(request.remote_addr) + "<br />" + \
'<strong>City:</strong> ' + str(city) + "<br />" + \
'<strong>Region:</strong> ' + str(region) + "<br />" + \
'<strong>Country:</strong> ' + str(country) + "<br />" + \
'<strong>Coordinates:</strong> <a href="https://www.google.com.au/maps/preview/@' + str(
coordinates) + ',8z">' + str(coordinates) + '</a><br />' + \
'<hr><strong>User Agent:</strong> ' + str(request.user_agent) + "<br />" + \
'<strong>Operating System:</strong> ' + str(operating_system) + "<br />" + \
'<strong>Browser:</strong> ' + str(browser) + "<br />" + \
'<strong>Browser Version:</strong> ' + str(browser_version) + "<br />" + \
'<hr><strong>Error Type:</strong> ' + exc_type.__name__ + "<br />" + \
'<strong>Description:</strong> ' + c['exception'] + "<br />" + \
'<strong>Method:</strong> ' + str(os.environ['REQUEST_METHOD']) + "<br />" + \
'<strong>URL:</strong> ' + c['url'] + "<br />" + \
'<strong>Referrer:</strong> ' + str(request.referer) + "<br />" + \
'<strong>Traceback:</strong> <br />' + '<br />'.join(lines)
if c['exception'] is not 'Error saving Email Log in datastore':
email_url = webapp2.uri_for('taskqueue-send-email')
for dev in request.app.config.get('developers'):
taskqueue.add(url=email_url, params={
'to': dev[1],
'subject': subject,
'body': message,
'sender': request.app.config.get('contact_sender'),
})
status_int = hasattr(exception, 'status_int') and exception.status_int or 500
template = request.app.config.get('error_templates')[status_int]
t = jinja2.get_jinja2(factory=jinja_bootstrap.jinja2_factory, app=webapp2.get_app()).render_template(template, **c)
logging.error("Error {}: {}".format(status_int, exception))
response.write(t)
response.set_status(status_int)
| {
"content_hash": "b9c39e557d56084e19dd10a8a6c32e17",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 119,
"avg_line_length": 48.951219512195124,
"alnum_prop": 0.5493273542600897,
"repo_name": "ThomasMarcel/webapp-course",
"id": "44081ce893b3896936367c751a80aeff5e65b8f9",
"size": "4066",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "resources/gae-boilerplate/bp_includes/lib/error_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "11753"
},
{
"name": "CSS",
"bytes": "9944"
},
{
"name": "Groff",
"bytes": "755"
},
{
"name": "HTML",
"bytes": "67232"
},
{
"name": "JavaScript",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "249658"
},
{
"name": "Shell",
"bytes": "2508"
}
],
"symlink_target": ""
} |
import os
import unittest
import subprocess32 as subprocess
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ColorSpaceTest( GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.ColorSpace()
o["in"].setInput( n["out"] )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( GafferImage.ImageAlgo.image( n["out"] ), GafferImage.ImageAlgo.image( o["out"] ) )
def testHashPassThrough( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.ColorSpace()
o["in"].setInput( n["out"] )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( GafferImage.ImageAlgo.image( n["out"] ), GafferImage.ImageAlgo.image( o["out"] ) )
o["enabled"].setValue( False )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
self.assertTrue(
o["out"].channelData( "R", imath.V2i( 0 ), _copy = False ).isSame(
n["out"].channelData( "R", imath.V2i( 0 ), _copy = False )
)
)
o["enabled"].setValue( True )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "linear" )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
self.assertTrue(
o["out"].channelData( "R", imath.V2i( 0 ), _copy = False ).isSame(
n["out"].channelData( "R", imath.V2i( 0 ), _copy = False )
)
)
def testImageHashPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.fileName )
o = GafferImage.ColorSpace()
o["in"].setInput( i["out"] )
self.assertEqual( GafferImage.ImageAlgo.imageHash( i["out"] ), GafferImage.ImageAlgo.imageHash( o["out"] ) )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( GafferImage.ImageAlgo.imageHash( i["out"] ), GafferImage.ImageAlgo.imageHash( o["out"] ) )
def testChannelsAreSeparate( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.exr" ) )
o = GafferImage.ColorSpace()
o["in"].setInput( i["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual(
o["out"].channelDataHash( "R", imath.V2i( 0 ) ),
o["out"].channelDataHash( "G", imath.V2i( 0 ) )
)
self.assertNotEqual(
o["out"].channelData( "R", imath.V2i( 0 ) ),
o["out"].channelData( "G", imath.V2i( 0 ) )
)
def testPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.fileName )
o = GafferImage.ColorSpace()
o["in"].setInput( i["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertEqual( i["out"]["format"].hash(), o["out"]["format"].hash() )
self.assertEqual( i["out"]["dataWindow"].hash(), o["out"]["dataWindow"].hash() )
self.assertEqual( i["out"]["channelNames"].hash(), o["out"]["channelNames"].hash() )
self.assertEqual( i["out"]["format"].getValue(), o["out"]["format"].getValue() )
self.assertEqual( i["out"]["dataWindow"].getValue(), o["out"]["dataWindow"].getValue() )
self.assertEqual( i["out"]["channelNames"].getValue(), o["out"]["channelNames"].getValue() )
def testContext( self ) :
scriptFileName = self.temporaryDirectory() + "/script.gfr"
contextImageFile = self.temporaryDirectory() + "/context.#.exr"
contextOverrideImageFile = self.temporaryDirectory() + "/context_override.#.exr"
s = Gaffer.ScriptNode()
s["reader"] = GafferImage.ImageReader()
s["reader"]["fileName"].setValue( self.fileName )
s["cs"] = GafferImage.ColorSpace()
s["cs"]["in"].setInput( s["reader"]["out"] )
s["cs"]["inputSpace"].setValue( "linear" )
s["cs"]["outputSpace"].setValue( "context" )
s["writer"] = GafferImage.ImageWriter()
s["writer"]["fileName"].setValue( contextImageFile )
s["writer"]["in"].setInput( s["cs"]["out"] )
s["writer"]["channels"].setValue( "R G B A" )
s["fileName"].setValue( scriptFileName )
s.save()
env = os.environ.copy()
env["OCIO"] = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/openColorIO/context.ocio" )
env["LUT"] = "srgb.spi1d"
env["CDL"] = "cineon.spi1d"
subprocess.check_call(
" ".join(["gaffer", "execute", scriptFileName,"-frames", "1"]),
shell = True,
stderr = subprocess.PIPE,
env = env,
)
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker_ocio_context.exr" ) )
o = GafferImage.ImageReader()
o["fileName"].setValue( contextImageFile )
expected = i["out"]
context = o["out"]
# check against expected output
self.assertImagesEqual( expected, context, ignoreMetadata = True )
# override context
s["writer"]["fileName"].setValue( contextOverrideImageFile )
s["cs"]["context"].addChild( Gaffer.NameValuePlug("LUT", "cineon.spi1d", True, "LUT", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s["cs"]["context"].addChild( Gaffer.NameValuePlug("CDL", "rec709.spi1d", True, "CDL", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s.save()
subprocess.check_call(
" ".join(["gaffer", "execute", scriptFileName,"-frames", "1"]),
shell = True,
stderr = subprocess.PIPE,
env = env
)
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker_ocio_context_override.exr" ) )
o = GafferImage.ImageReader()
o["fileName"].setValue( contextOverrideImageFile )
expected = i["out"]
context = o["out"]
# check override produce expected output
self.assertImagesEqual( expected, context, ignoreMetadata = True )
def testSingleChannelImage( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" )
self.assertEqual( r["out"]["channelNames"].getValue(), IECore.StringVectorData( [ "R" ] ) )
s = GafferImage.Shuffle()
s["in"].setInput( r["out"] )
s["channels"].addChild( s.ChannelPlug( "G", "R" ) )
s["channels"].addChild( s.ChannelPlug( "B", "R" ) )
c1 = GafferImage.ColorSpace()
c1["in"].setInput( r["out"] )
c1["inputSpace"].setValue( "linear" )
c1["outputSpace"].setValue( "sRGB" )
c2 = GafferImage.ColorSpace()
c2["in"].setInput( s["out"] )
c2["inputSpace"].setValue( "linear" )
c2["outputSpace"].setValue( "sRGB" )
self.assertEqual( c2["out"].channelData( "R", imath.V2i( 0 ) ), c1["out"].channelData( "R", imath.V2i( 0 ) ) )
def testUnpremultiplied( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.exr" ) )
shuffleAlpha = GafferImage.Shuffle()
shuffleAlpha["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "channel" ) )
shuffleAlpha["in"].setInput( i["out"] )
shuffleAlpha["channels"]["channel"]["out"].setValue( 'A' )
shuffleAlpha["channels"]["channel"]["in"].setValue( 'R' )
gradeAlpha = GafferImage.Grade()
gradeAlpha["in"].setInput( shuffleAlpha["out"] )
gradeAlpha["channels"].setValue( '[RGBA]' )
gradeAlpha["offset"].setValue( imath.Color4f( 0, 0, 0, 0.1 ) )
unpremultipliedColorSpace = GafferImage.ColorSpace()
unpremultipliedColorSpace["in"].setInput( gradeAlpha["out"] )
unpremultipliedColorSpace["processUnpremultiplied"].setValue( True )
unpremultipliedColorSpace["inputSpace"].setValue( 'linear' )
unpremultipliedColorSpace["outputSpace"].setValue( 'sRGB' )
unpremultiply = GafferImage.Unpremultiply()
unpremultiply["in"].setInput( gradeAlpha["out"] )
bareColorSpace = GafferImage.ColorSpace()
bareColorSpace["in"].setInput( unpremultiply["out"] )
bareColorSpace["inputSpace"].setValue( 'linear' )
bareColorSpace["outputSpace"].setValue( 'sRGB' )
premultiply = GafferImage.Premultiply()
premultiply["in"].setInput( bareColorSpace["out"] )
# Assert that with a non-zero alpha, processUnpremultiplied is identical to:
# unpremult, colorSpace, and premult
self.assertImagesEqual( unpremultipliedColorSpace["out"], premultiply["out"] )
gradeAlpha["multiply"].setValue( imath.Color4f( 1, 1, 1, 0.0 ) )
gradeAlpha["offset"].setValue( imath.Color4f( 0, 0, 0, 0.0 ) )
# Assert that when alpha is zero, processUnpremultiplied doesn't affect the result
defaultColorSpace = GafferImage.ColorSpace()
defaultColorSpace["in"].setInput( gradeAlpha["out"] )
defaultColorSpace["inputSpace"].setValue( 'linear' )
defaultColorSpace["outputSpace"].setValue( 'sRGB' )
self.assertImagesEqual( unpremultipliedColorSpace["out"], defaultColorSpace["out"] )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c8b47751290cb21ef6761c96886d9c8a",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 153,
"avg_line_length": 35.16129032258065,
"alnum_prop": 0.6711518858307849,
"repo_name": "hradec/gaffer",
"id": "f827fac64b9759050da4fd1349334995b5de8cb7",
"size": "11675",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/GafferImageTest/ColorSpaceTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "54696"
},
{
"name": "C++",
"bytes": "8682649"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "9458935"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14299"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.abspath('./_ext'))
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'The All of Us Raw Data Repository (RDR)'
copyright = '2019, RDR team' # pylint: disable=redefined-builtin
author = 'RDR Dev team'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'readthedocs_ext.readthedocs',
'rdrhtml'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index' # This explicitly sets master_doc to index.rst
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/simplification.css'
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#import stanford_theme
#html_theme = "stanford_theme"
#html_theme_path = [stanford_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
autodoc_member_order = 'bysource'
| {
"content_hash": "714d7b6577ac02898ceb2bfd66f0e62a",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6882656350741457,
"repo_name": "all-of-us/raw-data-repository",
"id": "54a8f38ef17eb3c814c04604adb7ffd3933c09d9",
"size": "3128",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""PubSub verifier used for end-to-end test."""
# pytype: skip-file
import logging
import time
from collections import Counter
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.io.gcp.pubsub import PubsubMessage
__all__ = ['PubSubMessageMatcher']
# Protect against environments where pubsub library is not available.
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
DEFAULT_TIMEOUT = 5 * 60
DEFAULT_SLEEP_TIME = 1
DEFAULT_MAX_MESSAGES_IN_ONE_PULL = 50
DEFAULT_PULL_TIMEOUT = 30.0
_LOGGER = logging.getLogger(__name__)
class PubSubMessageMatcher(BaseMatcher):
"""Matcher that verifies messages from given subscription.
This matcher can block the test and keep pulling messages from given
subscription until all expected messages are shown or timeout.
"""
def __init__(
self,
project,
sub_name,
expected_msg=None,
expected_msg_len=None,
timeout=DEFAULT_TIMEOUT,
with_attributes=False,
strip_attributes=None,
sleep_time=DEFAULT_SLEEP_TIME,
max_messages_in_one_pull=DEFAULT_MAX_MESSAGES_IN_ONE_PULL,
pull_timeout=DEFAULT_PULL_TIMEOUT):
"""Initialize PubSubMessageMatcher object.
Args:
project: A name string of project.
sub_name: A name string of subscription which is attached to output.
expected_msg: A string list that contains expected message data pulled
from the subscription. See also: with_attributes.
expected_msg_len: Number of expected messages pulled from the
subscription.
timeout: Timeout in seconds to wait for all expected messages appears.
with_attributes: If True, will match against both message data and
attributes. If True, expected_msg should be a list of ``PubsubMessage``
objects. Otherwise, it should be a list of ``bytes``.
strip_attributes: List of strings. If with_attributes==True, strip the
attributes keyed by these values from incoming messages.
If a key is missing, will add an attribute with an error message as
value to prevent a successful match.
sleep_time: Time in seconds between which the pulls from pubsub are done.
max_messages_in_one_pull: Maximum number of messages pulled from pubsub
at once.
pull_timeout: Time in seconds after which the pull from pubsub is repeated
"""
if pubsub is None:
raise ImportError('PubSub dependencies are not installed.')
if not project:
raise ValueError('Invalid project %s.' % project)
if not sub_name:
raise ValueError('Invalid subscription %s.' % sub_name)
if not expected_msg_len and not expected_msg:
raise ValueError(
'Required expected_msg: {} or expected_msg_len: {}.'.format(
expected_msg, expected_msg_len))
if expected_msg and not isinstance(expected_msg, list):
raise ValueError('Invalid expected messages %s.' % expected_msg)
if expected_msg_len and not isinstance(expected_msg_len, int):
raise ValueError('Invalid expected messages %s.' % expected_msg_len)
self.project = project
self.sub_name = sub_name
self.expected_msg = expected_msg
self.expected_msg_len = expected_msg_len or len(self.expected_msg)
self.timeout = timeout
self.messages = None
self.messages_all_details = None
self.with_attributes = with_attributes
self.strip_attributes = strip_attributes
self.sleep_time = sleep_time
self.max_messages_in_one_pull = max_messages_in_one_pull
self.pull_timeout = pull_timeout
def _matches(self, _):
if self.messages is None:
self.messages, self.messages_all_details = self._wait_for_messages(
self.expected_msg_len, self.timeout)
if self.expected_msg:
return Counter(self.messages) == Counter(self.expected_msg)
else:
return len(self.messages) == self.expected_msg_len
def _wait_for_messages(self, expected_num, timeout):
"""Wait for messages from given subscription."""
total_messages = []
total_messages_all_details = []
sub_client = pubsub.SubscriberClient()
start_time = time.time()
while time.time() - start_time <= timeout:
response = sub_client.pull(
subscription=self.sub_name,
max_messages=self.max_messages_in_one_pull,
timeout=self.pull_timeout)
for rm in response.received_messages:
msg = PubsubMessage._from_message(rm.message)
full_message = (
msg.data,
msg.attributes,
msg.attributes,
msg.publish_time,
msg.ordering_key)
if not self.with_attributes:
total_messages.append(msg.data)
total_messages_all_details.append(full_message)
continue
if self.strip_attributes:
for attr in self.strip_attributes:
try:
del msg.attributes[attr]
except KeyError:
msg.attributes[attr] = (
'PubSubMessageMatcher error: '
'expected attribute not found.')
total_messages.append(msg)
total_messages_all_details.append(full_message)
ack_ids = [rm.ack_id for rm in response.received_messages]
if ack_ids:
sub_client.acknowledge(subscription=self.sub_name, ack_ids=ack_ids)
if len(total_messages) >= expected_num:
break
time.sleep(self.sleep_time)
if time.time() - start_time > timeout:
_LOGGER.error(
'Timeout after %d sec. Received %d messages from %s.',
timeout,
len(total_messages),
self.sub_name)
return total_messages, total_messages_all_details
def describe_to(self, description):
description.append_text('Expected %d messages.' % self.expected_msg_len)
def describe_mismatch(self, _, mismatch_description):
c_expected = Counter(self.expected_msg)
c_actual = Counter(self.messages)
mismatch_description.append_text("Got %d messages. " % (len(self.messages)))
if self.expected_msg:
expected = (c_expected - c_actual).items()
unexpected = (c_actual - c_expected).items()
unexpected_keys = [repr(item[0]) for item in unexpected]
if self.with_attributes:
unexpected_all_details = [
x for x in self.messages_all_details
if 'PubsubMessage(%s, %s)' % (repr(x[0]), x[1]) in unexpected_keys
]
else:
unexpected_all_details = [
x for x in self.messages_all_details
if repr(x[0]) in unexpected_keys
]
mismatch_description.append_text(
"Diffs (item, count):\n"
" Expected but not in actual: %s\n"
" Unexpected: %s\n"
" Unexpected (with all details): %s" %
(expected, unexpected, unexpected_all_details))
if self.with_attributes and self.strip_attributes:
mismatch_description.append_text(
'\n Stripped attributes: %r' % self.strip_attributes)
| {
"content_hash": "615ec2d2e271f19e77e3734ecdf7dcfb",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 37.53225806451613,
"alnum_prop": 0.6580719094685575,
"repo_name": "apache/beam",
"id": "85836eaf3374615ae65d11038ffc0de757b81974",
"size": "7766",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/tests/pubsub_matcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "912687"
},
{
"name": "Dockerfile",
"bytes": "59805"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5508697"
},
{
"name": "Groovy",
"bytes": "936956"
},
{
"name": "HCL",
"bytes": "103872"
},
{
"name": "HTML",
"bytes": "184151"
},
{
"name": "Java",
"bytes": "41223435"
},
{
"name": "JavaScript",
"bytes": "119576"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "220768"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10728612"
},
{
"name": "Rust",
"bytes": "5168"
},
{
"name": "SCSS",
"bytes": "318364"
},
{
"name": "Sass",
"bytes": "25954"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "375834"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1997829"
}
],
"symlink_target": ""
} |
"""mel conversion ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if not isinstance(sample_rate, ops.Tensor):
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
@tf_export('signal.linear_to_mel_weight_matrix')
@dispatch.add_dispatch_support
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
This function follows the [Hidden Markov Model Toolkit
(HTK)](http://htk.eng.cam.ac.uk/) convention, defining the mel scale in
terms of a frequency in hertz according to the following formula:
$$\textrm{mel}(f) = 2595 * \textrm{log}_{10}(1 + \frac{f}{700})$$
In the returned matrix, all the triangles (filterbanks) have a peak value
of 1.0.
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: An integer or float `Tensor`. Samples per second of the input
signal used to create the spectrogram. Used to figure out the frequencies
corresponding to each spectrogram bin, which dictates how they are mapped
into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
# Convert Tensor `sample_rate` to float, if possible.
if isinstance(sample_rate, ops.Tensor):
maybe_const_val = tensor_util.constant_value(sample_rate)
if maybe_const_val is not None:
sample_rate = maybe_const_val
# Note: As num_spectrogram_bins is passed to `math_ops.linspace`
# and the validation is already done in linspace (both in shape function
# and in kernel), there is no need to validate num_spectrogram_bins here.
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# This function can be constant folded by graph optimization since there are
# no Tensor inputs.
sample_rate = math_ops.cast(
sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtype, name='upper_edge_hertz')
zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
| {
"content_hash": "808f234f237a3c72b1b4eed9396a881b",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 43.26829268292683,
"alnum_prop": 0.6676437429537768,
"repo_name": "aldian/tensorflow",
"id": "cf0bed9ef1be0661f0db947cc8dc434564d65eec",
"size": "9559",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/signal/mel_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
import argparse
import collections
import glob
import os
import re
import sys
parser = argparse.ArgumentParser(
description='Parse kolla build logs and extract useful information about '
'the installed packages.')
parser.add_argument('-l', '--logdir',
help='Path to the build log files',
required=True)
parser.add_argument('-b', '--base',
help='The kolla base_distro',
required=True)
args = vars(parser.parse_args())
if args['base'] not in ['centos']:
print("Non rpm-based distros are not yet supported.")
sys.exit()
obsolete = {}
pkg_installs = collections.defaultdict(set)
for filename in glob.glob(os.path.join(args['logdir'], '*.log')):
image = os.path.splitext(os.path.basename(filename))[0]
with open(filename) as f:
for line in f:
m = re.search(r"Package (.+) is obsoleted by (.+),", line)
if m:
if not m.group(1) in obsolete:
obsolete[m.group(1)] = {'obsoleted_by': m.group(2),
'images': [image]}
else:
obsolete[m.group(1)]['images'].append(image)
m = re.search(r"Package (.+)\..+ .+ will be installed", line)
if m:
pkg_installs[m.group(1)].add(image)
m = re.search(r"Processing Dependency: (.+)\(", line)
if m:
pkg_installs[m.group(1)].add(image)
if obsolete:
print("Found %s obsolete(s) package(s):" % len(obsolete))
for pkg in obsolete:
print("%s is obsoleted by %s (%s)" %
(pkg,
obsolete[pkg]['obsoleted_by'],
', '.join(obsolete[pkg]['images'])))
print('')
move_to_base_candidates = [
pkg for pkg in pkg_installs if len(pkg_installs[pkg]) > 10
and not ('base' in pkg_installs[pkg]
or 'openstack-base' in pkg_installs[pkg])
]
if move_to_base_candidates:
print("Consider moving the following packages to a base image:")
for pkg in move_to_base_candidates:
print("%s (%s)" %
(pkg,
', '.join(pkg_installs[pkg])))
| {
"content_hash": "c4b49488b2349430f75e070ea196f498",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 33.39393939393939,
"alnum_prop": 0.543557168784029,
"repo_name": "openstack/kolla",
"id": "22b7182273e0aa36cccd74cda4925693d51b3b7a",
"size": "2769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/files/process_build_logs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "235221"
},
{
"name": "Python",
"bytes": "227320"
},
{
"name": "Shell",
"bytes": "85926"
}
],
"symlink_target": ""
} |
from dragonfly import (Grammar, AppContext, MappingRule, Dictation, Key, Text, Integer, Mimic)
context = AppContext(title="buffergator")
grammar = Grammar("buffergator", context=context)
noSpaceNoCaps = Mimic("\\no-caps-on") + Mimic("\\no-space-on")
rules = MappingRule(
name = "buffergator",
mapping = {
"split": Key("c-s"),
"vertical": Key("c-v"),
},
extras = [
Dictation("text", format=False),
Integer("n", 1, 20000),
],
defaults = {
"text": '',
"n" : 1
}
)
grammar.add_rule(rules)
grammar.load()
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
| {
"content_hash": "b5a8df7bfc52adbad321b8baa2d00cea",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 94,
"avg_line_length": 22.1,
"alnum_prop": 0.5897435897435898,
"repo_name": "simianhacker/code-by-voice",
"id": "4b7a34bbeab6aaf3ade3fa98f107564101c1353e",
"size": "663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "macros/_buffergator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "427"
},
{
"name": "Python",
"bytes": "89834"
}
],
"symlink_target": ""
} |
'''
PARSER:
-------
This parser will intake dataverse dataset classes and
generate an HDX-like dictionary. It was designed to
register datasets in HDX in a posterior process.
'''
import os
import sys
import json
import requests
from copy import copy
from slugify import slugify
from datetime import datetime
from countrycode.countrycode import countrycode
def parse_dataset(data, private=True, fail_no_country=True):
'''
Function that parses a dataset.
'''
#
# Check that there is acually
# metadata to parse.
#
if data.get('latestVersion') is None:
raise ValueError('No data to parse.')
if data['latestVersion']['metadataBlocks'].get('geospatial') is None:
raise ValueError('No country entry found.')
resource = {
"package_id": None,
"url": None,
"name": None,
"format": None,
"description": None
}
metadata = {
'name': None,
'title': None,
'owner_org': 'ifpri',
'author': 'ifpridata',
'author_email': 'ifpri-data@cgiar.org',
'maintainer': 'ifpridata',
'maintainer_email': 'ifpri-data@cgiar.org',
'license_id': 'cc-by-sa',
'dataset_date': None, # has to be MM/DD/YYYY
'subnational': 1, # has to be 0 or 1. Default 1 for IFPRI.
'notes': None,
'caveats': None,
'data_update_frequency': '0',
'methodology': 'Other',
'methodology_other': None,
'dataset_source': '',
'package_creator': 'luiscape',
'private': private, # has to be True or False
'url': None,
'state': 'active', # always "active".
'tags': [{ 'name': 'Food' }, { 'name': 'Security' }], # has to be a list with { 'name': None }
'groups': [] # has to be ISO-3-letter-code. { 'id': None }
}
gallery = {
'title': None,
'type': 'paper',
'description': None,
'url': None,
'image_url': 'http://www.ifpri.org/sites/all/themes/custom/serenity/logo.png', # IFPRI's logo.
'dataset_id': None
}
#
# Parsing for:
#
# - metadata name
# - metadata title
# - metadata dataset_date
# - metadata notes
# - metadata groups (countries)
# - metadata source
#
for field in data['latestVersion']['metadataBlocks']['citation']['fields']:
if field.get('typeName') == 'title':
metadata['title'] = str(field['value'])
metadata['name'] = str(slugify(field['value']))[:90]
if field.get('typeName') == 'timePeriodCovered':
for f in field['value']:
if f.get('timePeriodCoveredStart') is not None:
metadata['dataset_date'] = str(f['timePeriodCoveredStart']['value'])
else:
metadata['dataset_date'] = ''
authors = []
if field.get('typeName') == 'author':
for f in field['value']:
if f['authorName'].get('value') is not None:
authors.append(f['authorName'].get('value'))
metadata['dataset_source'] = ', '.join(authors)
if field.get('typeName') == 'dsDescription':
metadata['notes'] = str(field.get('value')[0].get('dsDescriptionValue').get('value'))
for location in data['latestVersion']['metadataBlocks']['geospatial']['fields']:
if location.get('typeName') == 'geographicCoverage':
for country in location['value']:
if country.get('country') is not None:
name = country['country'].get('value')
code = countrycode(codes=str(name), origin='country_name', target='iso3c')
result = { 'id': code.lower() }
metadata['groups'].append(result)
else:
if fail_no_country:
raise ValueError('No country entry found.')
else:
pass
resources = []
desired_file_extensions = ['xls', 'xlsx', 'csv', 'zip', 'tsv', 'shp', 'geojson', 'json']
for file in data['latestVersion']['files']:
#
# Checking for data file.
#
file_name = file.get('datafile').get('name')
if file_name is not None:
extension = os.path.splitext(file_name)[1][1:].lower()
if extension in desired_file_extensions:
resource['package_id'] = metadata['name']
resource['url'] = 'https://dataverse.harvard.edu/api/access/datafile/' + str(file['datafile'].get('id'))
resource['name'] = file_name
resource['format'] = extension.upper()
resources.append(copy(resource))
else:
continue
return { 'metadata': metadata, 'resources': resources }
| {
"content_hash": "0c779debcc914f0feac46f42eb94290f",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 112,
"avg_line_length": 28.90728476821192,
"alnum_prop": 0.6020618556701031,
"repo_name": "luiscape/hdxscraper-ifpri-dataverse",
"id": "64f157b27b3e64164a1a66214f29a677ff34c586",
"size": "4407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "32589"
},
{
"name": "Shell",
"bytes": "555"
}
],
"symlink_target": ""
} |
import json
import uuid
import unittest
import testtools
from infosystem.tests.functional import test_base
class UserTestCase(test_base.CRUDTest,
testtools.TestCase):
def load_fixtures(self):
domain = self.post(
resource_ref={'name': uuid.uuid4().hex, 'active': True},
resource_name='domain',
collection_name='domains')
self.domain_id = domain['id']
@property
def resource_name(self):
return 'user'
@property
def required_attributes(self):
return ['name', 'domain_id', 'email', 'password']
@property
def optional_attributes(self):
return ['active']
@property
def unique_attributes(self):
return [('name', 'domain_id')]
@property
def hidden_attributes(self):
return ['password']
def new_resource_ref(self):
return {'name': uuid.uuid4().hex,
'domain_id': self.domain_id,
'email': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'active': True}
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d16829a8e33b6e9e9da013f02c059b60",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 68,
"avg_line_length": 23.122448979591837,
"alnum_prop": 0.5675198587819947,
"repo_name": "samueldmq/infosystem",
"id": "c8150eacab4d704d361c7ba1aadb07b41d5d47a9",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infosystem/tests/functional/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90123"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from nova import db
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
ram_allocation_ratio_opt = cfg.FloatOpt('ram_allocation_ratio',
default=1.5,
help='Virtual ram to physical ram allocation ratio which affects '
'all ram filters. This configuration specifies a global ratio '
'for RamFilter. For AggregateRamFilter, it will fall back to '
'this configuration value if no per-aggregate setting found.')
CONF = cfg.CONF
CONF.register_opt(ram_allocation_ratio_opt)
class BaseRamFilter(filters.BaseHostFilter):
def _get_ram_allocation_ratio(self, host_state, filter_properties):
raise NotImplementedError
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
instance_type = filter_properties.get('instance_type')
requested_ram = instance_type['memory_mb']
free_ram_mb = host_state.free_ram_mb
total_usable_ram_mb = host_state.total_usable_ram_mb
ram_allocation_ratio = self._get_ram_allocation_ratio(host_state,
filter_properties)
memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio
used_ram_mb = total_usable_ram_mb - free_ram_mb
usable_ram = memory_mb_limit - used_ram_mb
if not usable_ram >= requested_ram:
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
"usable ram, it only has %(usable_ram)s MB usable ram.",
{'host_state': host_state,
'requested_ram': requested_ram,
'usable_ram': usable_ram})
return False
# save oversubscription limit for compute node to test against:
host_state.limits['memory_mb'] = memory_mb_limit
return True
class RamFilter(BaseRamFilter):
"""Ram Filter with over subscription flag."""
ram_allocation_ratio = CONF.ram_allocation_ratio
def _get_ram_allocation_ratio(self, host_state, filter_properties):
return self.ram_allocation_ratio
class AggregateRamFilter(BaseRamFilter):
"""AggregateRamFilter with per-aggregate ram subscription flag.
Fall back to global ram_allocation_ratio if no per-aggregate setting found.
"""
def _get_ram_allocation_ratio(self, host_state, filter_properties):
context = filter_properties['context']
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solution here to fix
# all filters with aggregate DB call things.
metadata = db.aggregate_metadata_get_by_host(
context, host_state.host, key='ram_allocation_ratio')
aggregate_vals = metadata.get('ram_allocation_ratio', set())
num_values = len(aggregate_vals)
if num_values == 0:
return CONF.ram_allocation_ratio
if num_values > 1:
LOG.warning(_("%(num_values)d ratio values found, "
"of which the minimum value will be used."),
{'num_values': num_values})
try:
ratio = float(min(aggregate_vals))
except ValueError as e:
LOG.warning(_("Could not decode ram_allocation_ratio: '%s'"), e)
ratio = CONF.ram_allocation_ratio
return ratio
| {
"content_hash": "2079fecdbc253f2062b76c92b1443573",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 39.53333333333333,
"alnum_prop": 0.6337830241708825,
"repo_name": "afrolov1/nova",
"id": "25a5031afbc1ae13d3980ca4b3238db30200f2e0",
"size": "4232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/scheduler/filters/ram_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14057622"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
# render template then response
from django.shortcuts import render_to_response
# just response as string
from django.http import HttpResponse
from books.models import Book
def search_form(request):
return render_to_response('search_form.html')
def search(request):
errors = []
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append('Enter a search term')
elif len(q) > 20:
errors.append('Please enter at most 20 characters')
else:
books = Book.objects.filter(title__icontains=q)
return render_to_response('search_results.html',
{'books': books, 'query': q})
return render_to_response('search_form.html', {'errors': errors})
| {
"content_hash": "a0f5a0218da53f8cceb16d02fecfa3bd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 69,
"avg_line_length": 30.40740740740741,
"alnum_prop": 0.6431181485992692,
"repo_name": "jizhouli/django-project-sample",
"id": "4b0490f73ae484a867d4433cd44c372e5a7c23b4",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "10334"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .buffer import Buffer, AcceptAction
from .buffer_mapping import BufferMapping
from .clipboard import Clipboard, InMemoryClipboard
from .enums import DEFAULT_BUFFER, EditingMode
from .filters import CLIFilter, to_cli_filter
from .key_binding.bindings.basic import load_basic_bindings
from .key_binding.bindings.emacs import load_emacs_bindings
from .key_binding.bindings.vi import load_vi_bindings
from .key_binding.registry import BaseRegistry
from .key_binding.defaults import load_key_bindings
from .layout import Window
from .layout.containers import Container
from .layout.controls import BufferControl
from .styles import DEFAULT_STYLE, Style
import six
__all__ = (
'AbortAction',
'Application',
)
class AbortAction(object):
"""
Actions to take on an Exit or Abort exception.
"""
RETRY = 'retry'
RAISE_EXCEPTION = 'raise-exception'
RETURN_NONE = 'return-none'
_all = (RETRY, RAISE_EXCEPTION, RETURN_NONE)
class Application(object):
"""
Application class to be passed to a
:class:`~prompt_toolkit.interface.CommandLineInterface`.
This contains all customizable logic that is not I/O dependent.
(So, what is independent of event loops, input and output.)
This way, such an :class:`.Application` can run easily on several
:class:`~prompt_toolkit.interface.CommandLineInterface` instances, each
with a different I/O backends. that runs for instance over telnet, SSH or
any other I/O backend.
:param layout: A :class:`~prompt_toolkit.layout.containers.Container` instance.
:param buffer: A :class:`~prompt_toolkit.buffer.Buffer` instance for the default buffer.
:param initial_focussed_buffer: Name of the buffer that is focussed during start-up.
:param key_bindings_registry:
:class:`~prompt_toolkit.key_binding.registry.BaseRegistry` instance for
the key bindings.
:param clipboard: :class:`~prompt_toolkit.clipboard.base.Clipboard` to use.
:param on_abort: What to do when Control-C is pressed.
:param on_exit: What to do when Control-D is pressed.
:param use_alternate_screen: When True, run the application on the alternate screen buffer.
:param get_title: Callable that returns the current title to be displayed in the terminal.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In readline mode, this is usually
reversed.
Filters:
:param mouse_support: (:class:`~prompt_toolkit.filters.CLIFilter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~prompt_toolkit.filters.CLIFilter` or boolean.
:param ignore_case: :class:`~prompt_toolkit.filters.CLIFilter` or boolean.
:param editing_mode: :class:`~prompt_toolkit.enums.EditingMode`.
Callbacks (all of these should accept a
:class:`~prompt_toolkit.interface.CommandLineInterface` object as input.)
:param on_input_timeout: Called when there is no input for x seconds.
(Fired when any eventloop.onInputTimeout is fired.)
:param on_start: Called when reading input starts.
:param on_stop: Called when reading input ends.
:param on_reset: Called during reset.
:param on_buffer_changed: Called when the content of a buffer has been changed.
:param on_initialize: Called after the
:class:`~prompt_toolkit.interface.CommandLineInterface` initializes.
:param on_render: Called right after rendering.
:param on_invalidate: Called when the UI has been invalidated.
"""
def __init__(self, layout=None, buffer=None, buffers=None,
initial_focussed_buffer=DEFAULT_BUFFER,
style=None,
key_bindings_registry=None, clipboard=None,
on_abort=AbortAction.RAISE_EXCEPTION, on_exit=AbortAction.RAISE_EXCEPTION,
use_alternate_screen=False, mouse_support=False,
get_title=None,
paste_mode=False, ignore_case=False, editing_mode=EditingMode.EMACS,
erase_when_done=False,
reverse_vi_search_direction=False,
on_input_timeout=None, on_start=None, on_stop=None,
on_reset=None, on_initialize=None, on_buffer_changed=None,
on_render=None, on_invalidate=None):
paste_mode = to_cli_filter(paste_mode)
ignore_case = to_cli_filter(ignore_case)
mouse_support = to_cli_filter(mouse_support)
reverse_vi_search_direction = to_cli_filter(reverse_vi_search_direction)
assert layout is None or isinstance(layout, Container)
assert buffer is None or isinstance(buffer, Buffer)
assert buffers is None or isinstance(buffers, (dict, BufferMapping))
assert key_bindings_registry is None or isinstance(key_bindings_registry, BaseRegistry)
assert clipboard is None or isinstance(clipboard, Clipboard)
assert on_abort in AbortAction._all
assert on_exit in AbortAction._all
assert isinstance(use_alternate_screen, bool)
assert get_title is None or callable(get_title)
assert isinstance(paste_mode, CLIFilter)
assert isinstance(ignore_case, CLIFilter)
assert isinstance(editing_mode, six.string_types)
assert on_input_timeout is None or callable(on_input_timeout)
assert style is None or isinstance(style, Style)
assert isinstance(erase_when_done, bool)
assert on_start is None or callable(on_start)
assert on_stop is None or callable(on_stop)
assert on_reset is None or callable(on_reset)
assert on_buffer_changed is None or callable(on_buffer_changed)
assert on_initialize is None or callable(on_initialize)
assert on_render is None or callable(on_render)
assert on_invalidate is None or callable(on_invalidate)
self.layout = layout or Window(BufferControl())
# Make sure that the 'buffers' dictionary is a BufferMapping.
# NOTE: If no buffer is given, we create a default Buffer, with IGNORE as
# default accept_action. This is what makes sense for most users
# creating full screen applications. Doing nothing is the obvious
# default. Those creating a REPL would use the shortcuts module that
# passes in RETURN_DOCUMENT.
self.buffer = buffer or Buffer(accept_action=AcceptAction.IGNORE)
if not buffers or not isinstance(buffers, BufferMapping):
self.buffers = BufferMapping(buffers, initial=initial_focussed_buffer)
else:
self.buffers = buffers
if buffer:
self.buffers[DEFAULT_BUFFER] = buffer
self.initial_focussed_buffer = initial_focussed_buffer
self.style = style or DEFAULT_STYLE
if key_bindings_registry is None:
key_bindings_registry = load_key_bindings()
if get_title is None:
get_title = lambda: None
self.key_bindings_registry = key_bindings_registry
self.clipboard = clipboard or InMemoryClipboard()
self.on_abort = on_abort
self.on_exit = on_exit
self.use_alternate_screen = use_alternate_screen
self.mouse_support = mouse_support
self.get_title = get_title
self.paste_mode = paste_mode
self.ignore_case = ignore_case
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
def dummy_handler(cli):
" Dummy event handler. "
self.on_input_timeout = on_input_timeout or dummy_handler
self.on_start = on_start or dummy_handler
self.on_stop = on_stop or dummy_handler
self.on_reset = on_reset or dummy_handler
self.on_initialize = on_initialize or dummy_handler
self.on_buffer_changed = on_buffer_changed or dummy_handler
self.on_render = on_render or dummy_handler
self.on_invalidate = on_invalidate or dummy_handler
# List of 'extra' functions to execute before a CommandLineInterface.run.
# Note: It's important to keep this here, and not in the
# CommandLineInterface itself. shortcuts.run_application creates
# a new Application instance everytime. (Which is correct, it
# could be that we want to detach from one IO backend and attach
# the UI on a different backend.) But important is to keep as
# much state as possible between runs.
self.pre_run_callables = []
| {
"content_hash": "719417ff50493dd6ff3f5cb7871d9ce5",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 95,
"avg_line_length": 45.557291666666664,
"alnum_prop": 0.6834343203384018,
"repo_name": "Edu-Glez/Bank_sentiment_analysis",
"id": "272d8bbcbb0512eac1ff9c6e38dc33b44d8b4fdf",
"size": "8747",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/prompt_toolkit/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lex",
"bytes": "101463"
},
{
"name": "Python",
"bytes": "29876"
},
{
"name": "Shell",
"bytes": "1509"
}
],
"symlink_target": ""
} |
from logbook import FileHandler
from zipline.finance.blotter import ORDER_STATUS
from six import itervalues
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
class ExceptionTransform(object):
def __init__(self):
self.window_length = 1
pass
def get_hash(self):
return "ExceptionTransform"
def update(self, event):
assert False, "An assertion message"
| {
"content_hash": "b0d75b8071f491198b48872550b24bd7",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 65,
"avg_line_length": 24.169642857142858,
"alnum_prop": 0.6039896564462505,
"repo_name": "lsbardel/zipline",
"id": "9f348c3b95792f0d1658197a3b99d392ee959ec6",
"size": "2707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/utils/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Lob
The Lob API is organized around REST. Our API is designed to have predictable, resource-oriented URLs and uses HTTP response codes to indicate any API errors. <p> Looking for our [previous documentation](https://lob.github.io/legacy-docs/)? # noqa: E501
The version of the OpenAPI document: 1.3.0
Contact: lob-openapi@lob.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from lob_python.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from lob_python.exceptions import ApiAttributeError
from lob_python.model.check import Check
globals()['Check'] = Check
class CheckList(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'data': (list, type(None)), # noqa: E501
'object': (str, type(None)), # noqa: E501
'next_url': (str, type(None)), # noqa: E501
'previous_url': (str, type(None)), # noqa: E501
'count': (int, type(None)), # noqa: E501
'total_count': (int, type(None)), # noqa: E501
}
@cached_property
def discriminator():
return None
def getNextPageToken(self):
if (self.next_url):
after_index = self.next_url.find("after=")
return self.next_url[after_index+6:]
def getPreviousPageToken(self):
if (self.previous_url):
before_index = self.previous_url.find("before=")
return self.previous_url[before_index+7:]
attribute_map = {
'data': 'data', # noqa: E501
'object': 'object', # noqa: E501
'next_url': 'next_url', # noqa: E501
'previous_url': 'previous_url', # noqa: E501
'count': 'count', # noqa: E501
'total_count': 'total_count', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CheckList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (list, type(None)): list of checks. [optional] # noqa: E501
object (str, type(None)): Value is type of resource.. [optional] # noqa: E501
next_url (str, type(None)): url of next page of items in list.. [optional] # noqa: E501
previous_url (str, type(None)): url of previous page of items in list.. [optional] # noqa: E501
count (int, type(None)): number of resources in a set. [optional] # noqa: E501
total_count (int, type(None)): indicates the total number of records. Provided when the request specifies an \"include\" query parameter. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CheckList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (list, type(None)): list of checks. [optional] # noqa: E501
object (str, type(None)): Value is type of resource.. [optional] # noqa: E501
next_url (str, type(None)): url of next page of items in list.. [optional] # noqa: E501
previous_url (str, type(None)): url of previous page of items in list.. [optional] # noqa: E501
count (int, type(None)): number of resources in a set. [optional] # noqa: E501
total_count (int, type(None)): indicates the total number of records. Provided when the request specifies an \"include\" query parameter. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| {
"content_hash": "7c147f5b02f1015a7ecc59017aa1a63a",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 259,
"avg_line_length": 46.37979094076655,
"alnum_prop": 0.553226654646533,
"repo_name": "lob/lob-python",
"id": "77f83749eaa40f2551cd7b1e56a422d1d164357a",
"size": "13311",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lob_python/model/check_list.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3327426"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='microsoftbotframework',
version='0.1.17',
description='A wrapper for the microsoft bot framework API',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Communications :: Chat',
],
keywords='microsoft bot framework flask celery',
url='https://github.com/Grungnie/microsoftbotframework',
author='Matthew Brown',
author_email='mbrown1508@outlook.com',
license='MIT',
packages=['microsoftbotframework'],
install_requires=[
"Flask",
"celery",
"requests",
"redis",
"pyyaml",
],
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
)
| {
"content_hash": "9b420b4d385728732472df52483e0b02",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 30.533333333333335,
"alnum_prop": 0.5655021834061136,
"repo_name": "RaminderSinghSahni/micro-ram-bot",
"id": "cbd762fa189f57048af89e1700b6d3892ff78928",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146577"
}
],
"symlink_target": ""
} |
import sys
import argparse
from evohomeclient2 import EvohomeClient
try:
from config import username, password
except ImportError:
print("Please configure config.py")
sys.exit()
def do_restore(filename, debug):
print(f"Restoring schedule from: {filename}")
client = EvohomeClient(username, password, debug=debug)
client.zone_schedules_restore(filename)
print("Finished")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("filename")
args = parser.parse_args()
do_restore(args.filename, args.debug)
if __name__ == "__main__":
main()
| {
"content_hash": "97f295f49d6177bf1e5a46af76d953f8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 22.266666666666666,
"alnum_prop": 0.688622754491018,
"repo_name": "andrew-blake/evohome-monitor",
"id": "09835a075650f9da2c17e00bbff32cc0912d3b24",
"size": "691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "schedule_restore.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10481"
}
],
"symlink_target": ""
} |
from flask import render_template
from .. import lastuser_ui
@lastuser_ui.route('/')
def index():
return render_template('index.html')
| {
"content_hash": "7301fb14c9ba1eed87b5b4fc8d8d64a2",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 17.75,
"alnum_prop": 0.7112676056338029,
"repo_name": "sindhus/lastuser",
"id": "4cc39f78e5edf10fd52effbbad77ac80ba250078",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lastuser_ui/views/index.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3623"
},
{
"name": "HTML",
"bytes": "35810"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "349287"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
"""Unit-test suite for `pptx.oxml.slide` module."""
from pptx.oxml.slide import CT_NotesMaster, CT_NotesSlide
from ..unitutil.file import snippet_text
class DescribeCT_NotesMaster(object):
"""Unit-test suite for `pptx.oxml.slide.CT_NotesMaster` objects."""
def it_can_create_a_default_notesMaster_element(self):
notesMaster = CT_NotesMaster.new_default()
assert notesMaster.xml == snippet_text("default-notesMaster")
class DescribeCT_NotesSlide(object):
"""Unit-test suite for `pptx.oxml.slide.CT_NotesSlide` objects."""
def it_can_create_a_new_notes_element(self):
notes = CT_NotesSlide.new()
assert notes.xml == snippet_text("default-notes")
| {
"content_hash": "001fc8b3677a7322d307bcbb5df0f436",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.7057142857142857,
"repo_name": "scanny/python-pptx",
"id": "d1b48ebc427ec5868ce5131118328a60d2cca390",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/oxml/test_slide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "124592"
},
{
"name": "Makefile",
"bytes": "2055"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "2152173"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import inspect
import warnings
from functools import wraps
import six
from pants.base.revision import Revision
from pants.version import VERSION
_PANTS_SEMVER = Revision.semver(VERSION)
class DeprecationApplicationError(Exception):
"""The base exception type thrown for any form of @deprecation application error."""
class MissingRemovalVersionError(DeprecationApplicationError):
"""Indicates the required removal_version was not supplied."""
class BadRemovalVersionError(DeprecationApplicationError):
"""Indicates the supplied removal_version was not a valid semver string."""
class PastRemovalVersionError(DeprecationApplicationError):
"""Indicates the supplied removal_version is not in the future.
All deprecations must give at least until the next release for users to adapt.
"""
class BadDecoratorNestingError(DeprecationApplicationError):
"""Indicates the @deprecated decorator was innermost in a sequence of layered decorators."""
def check_deprecated_semver(removal_version):
"""Check to see if the removal version is < the current Pants version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:raises DeprecationApplicationError if the removal_version parameter is invalid or the version
is not an earlier version than the current release version.
"""
if not isinstance(removal_version, six.string_types):
raise BadRemovalVersionError('The removal_version must be a semver version string.')
try:
removal_semver = Revision.semver(removal_version)
except Revision.BadRevision as e:
raise BadRemovalVersionError('The given removal version {} is not a valid semver: '
'{}'.format(removal_version, e))
if removal_semver <= _PANTS_SEMVER:
raise PastRemovalVersionError('The removal version must be greater than the current pants '
'version of {} - given {}'.format(VERSION, removal_version))
def deprecated_conditional(predicate,
removal_version,
hint_message,
stacklevel=3):
"""Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param unicode removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:param unicode predicate_description: A string describing what the predicate means.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:param unicode hint_message: An optional hint pointing to alternatives to the deprecation.
:raises DeprecationApplicationError if the deprecation is applied improperly.
"""
if removal_version is None:
raise MissingRemovalVersionError('A removal_version must be specified for this deprecation.')
check_deprecated_semver(removal_version)
if predicate():
warning_message = ('\n{hint_message}'
'\nWill be removed in version {removal_version}.'.format(
hint_message=hint_message,
removal_version=removal_version))
warnings.warn(warning_message, DeprecationWarning, stacklevel=stacklevel)
def deprecated(removal_version, hint_message=None):
"""Marks a function or method as deprecated.
A removal version must be supplied and it must be greater than the current 'pantsbuild.pants'
version.
When choosing a removal version there is a natural tension between the code-base, which benefits
from short deprecation cycles, and the user-base which may prefer to deal with deprecations less
frequently. As a rule of thumb, if the hint message can fully convey corrective action
succinctly and you judge the impact to be on the small side (effects custom tasks as opposed to
effecting BUILD files), lean towards the next release version as the removal version; otherwise,
consider initiating a discussion to win consensus on a reasonable removal version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:param str hint_message: An optional hint pointing to alternatives to the deprecation.
:raises DeprecationApplicationError if the @deprecation is applied improperly.
"""
if removal_version is None:
raise MissingRemovalVersionError('A removal_version must be specified for this deprecation.')
check_deprecated_semver(removal_version)
def decorator(func):
if not inspect.isfunction(func):
raise BadDecoratorNestingError('The @deprecated decorator must be applied innermost of all '
'decorators.')
warning_message = ('\n{module}.{func_name} is deprecated and will be removed in version '
'{removal_version}').format(module=func.__module__,
func_name=func.__name__,
removal_version=removal_version)
if hint_message:
warning_message += (':\n' + hint_message)
else:
warning_message += '.'
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(warning_message, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
def deprecated_module(removal_version, hint_message=None):
"""Marks an entire module as deprecated.
Add a call to this at the top of the deprecated module, and it will print a warning message
when the module is imported.
Arguments are as for deprecated(), above.
"""
if removal_version is None:
raise MissingRemovalVersionError('A removal_version must be specified for this deprecation.')
check_deprecated_semver(removal_version)
warning_message = ('\nModule is deprecated and will be removed in version '
'{removal_version}').format(removal_version=removal_version)
if hint_message:
warning_message += (': ' + hint_message)
else:
warning_message += '.'
warnings.warn(warning_message, DeprecationWarning, stacklevel=2)
| {
"content_hash": "4c69b5b69dd531dec3f31c349f3ad174",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 100,
"avg_line_length": 41.139240506329116,
"alnum_prop": 0.7086153846153846,
"repo_name": "dturner-tw/pants",
"id": "c5d4fc4106f952fea8dae04f7ea53ff9918f3249",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/base/deprecated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11538"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1849"
},
{
"name": "HTML",
"bytes": "70358"
},
{
"name": "Java",
"bytes": "293253"
},
{
"name": "JavaScript",
"bytes": "31042"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4404984"
},
{
"name": "Scala",
"bytes": "85217"
},
{
"name": "Shell",
"bytes": "50774"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
} |
"""Test object in lib/assemblers/base."""
import re
from os.path import join
import subprocess
from unittest.mock import MagicMock, patch, call
import pytest
from lib.assemblers.base import BaseAssembler
def build_assembler():
"""Build a generic assembler."""
args = {
'bit_score': 44,
'contig_length': 55,
'temp_dir': 'my_temp_dir',
'assembler': 'my_assembler'}
asm = BaseAssembler(args, 'my_db_conn')
asm.state = {
'blast_db': 'my_blast_db',
'query_file': 'my_query_file.seq',
'query_target': 'my_query_name',
'iter_dir': 'my_iter_dir',
'iteration': 99,
'cxn': 'my_cxn'}
return asm
def test_init_01():
"""It initializes an assembler object."""
asm = BaseAssembler('args', 'cxn')
assert asm.args == 'args'
assert asm.blast_only is False
assert asm.steps == []
assert asm.file == {}
expected = {
'iteration': 0,
'query_target': '',
'query_file': '',
'blast_db': '',
'iter_dir': '',
'cxn': 'cxn'}
assert asm.state == expected
def test_init_iteration_01():
"""It sets up assembler variables for an iteration."""
asm = build_assembler()
blast_db = 'another_blast_db'
query_file = 'another_query_file'
iteration = 99
asm.init_iteration(blast_db, query_file, iteration)
assert asm.state['blast_db'] == blast_db
assert asm.state['query_file'] == query_file
assert asm.state['iteration'] == iteration
def test_setup_files_01():
"""It sets up assembler file names for an iteration."""
asm = build_assembler()
iter_dir = 'my_iter_dir'
asm.setup_files(iter_dir)
assert asm.state['iter_dir'] == iter_dir
assert asm.file['long_reads'] == ''
assert asm.file['output'].endswith(join(iter_dir, 'output.fasta'))
assert asm.file['paired_1'].endswith(join(iter_dir, 'paired_1.fasta'))
assert asm.file['paired_2'].endswith(join(iter_dir, 'paired_2.fasta'))
assert asm.file['single_1'].endswith(join(iter_dir, 'single_1.fasta'))
assert asm.file['single_2'].endswith(join(iter_dir, 'single_2.fasta'))
assert asm.file['single_any'].endswith(join(iter_dir, 'single_any.fasta'))
assert asm.file['paired_count'] == 0
assert asm.file['single_1_count'] == 0
assert asm.file['single_2_count'] == 0
assert asm.file['single_any_count'] == 0
def test_file_prefix_01():
"""It builds the directory for the iteration."""
asm = build_assembler()
expected = '{}_{}_{:02d}_'.format(
asm.state['blast_db'],
asm.state['query_target'],
asm.state['iteration'])
assert asm.file_prefix() == expected
def test_iter_file_01():
"""It builds a file name with the iter dir."""
file_name = 'my_file.txt'
asm = build_assembler()
expected = join(asm.state['iter_dir'], file_name)
assert asm.iter_file(file_name).endswith(expected)
def test_work_path_01():
"""It builds the work directory."""
asm = build_assembler()
assert asm.work_path() == asm.state['iter_dir']
@patch('lib.log.info')
def test_run_01(info):
"""It performs a normal flow."""
asm = build_assembler()
asm.assemble = MagicMock()
asm.run()
info.assert_called_once_with(
'Assembling shards with {}: iteration {}'.format(
asm.args['assembler'], asm.state['iteration']))
@patch('lib.log.info')
@patch('lib.log.error')
def test_run_02(error, info):
"""It handles a timeout error."""
args = {'assembler': 'my_assembler', 'timeout': 10}
asm = BaseAssembler(args, 'cxn')
asm.assemble = MagicMock(side_effect=TimeoutError())
with pytest.raises(TimeoutError) as timeout_error:
asm.run()
error_msg = str(timeout_error.value)
if error_msg[-1] != '.':
error_msg += '.'
assert error_msg == (
'Time ran out for the assembler after 0:00:10 (HH:MM:SS).')
expect = 'Assembling shards with {}: iteration {}'.format(
args['assembler'], asm.state['iteration'])
info.assert_called_once_with(expect)
# Python 3.6 formats exceptions differently so we need to do this
assert error.call_count == 1
regex = re.compile(
r'Time ran out for the assembler after 0:00:10 \(HH:MM:SS\)')
assert regex.match(error.call_args[0][0])
@patch('lib.log.info')
@patch('lib.log.error')
def test_run_03(error, info):
"""It handles a subprocess error."""
error_code = 88
cmd = 'my command'
error = subprocess.CalledProcessError(error_code, cmd)
asm = build_assembler()
asm.assemble = MagicMock(side_effect=error)
with pytest.raises(RuntimeError) as runtime_error:
asm.run()
error_msg = str(runtime_error.value)
if error_msg[-1] != '.':
error_msg += '.'
assert error_msg == (
"The assembler failed with error: Command 'my command' "
"returned non-zero exit status 88.")
expect = 'Assembling shards with {}: iteration {}'.format(
asm.args['assembler'], asm.state['iteration'])
info.assert_called_once_with(expect)
@patch('lib.log.info')
@patch('lib.db_atram.sra_blast_hits_count')
def test_count_blast_hits_01(sra_blast_hits_count, info):
"""It handles no blast hits."""
asm = build_assembler()
sra_blast_hits_count.return_value = 0
assert asm.count_blast_hits() == 0
info.assert_called_once_with('0 blast hits in iteration {}'.format(
asm.state['iteration']))
@patch('lib.log.info')
@patch('lib.db_atram.sra_blast_hits_count')
def test_count_blast_hits_02(sra_blast_hits_count, info):
"""It handles one blast hits."""
asm = build_assembler()
sra_blast_hits_count.return_value = 1
assert asm.count_blast_hits() == 1
info.assert_called_once_with('1 blast hits in iteration {}'.format(
asm.state['iteration']))
@patch('lib.log.info')
def test_nothing_assembled_01(info):
"""It handles when nothing is assembled."""
asm = build_assembler()
asm.file['output'] = 'tests/data/missing_file.txt'
assert asm.nothing_assembled()
info.assert_called_once_with(
'No new assemblies in iteration {}'.format(
asm.state['iteration']))
@patch('lib.log.info')
def test_nothing_assembled_02(info):
"""It handles an empty assembly."""
asm = build_assembler()
asm.file['output'] = 'tests/data/empty_file.txt'
assert asm.nothing_assembled()
expect = 'No new assemblies in iteration {}'.format(
asm.state['iteration'])
info.assert_called_once_with(expect)
@patch('lib.log.info')
def test_nothing_assembled_03(info):
"""It handles when something is assembled."""
asm = build_assembler()
asm.file['output'] = 'tests/data/load_seq1.txt'
assert not asm.nothing_assembled()
info.assert_not_called()
@patch('lib.log.info')
@patch('lib.db_atram.assembled_contigs_count')
def test_assembled_contigs_count_01(assembled_contigs_count, info):
"""Handle when here are no contigs."""
high_score = 5
asm = build_assembler()
assembled_contigs_count.return_value = 0
assert asm.assembled_contigs_count(high_score) == 0
assembled_contigs_count.assert_called_once_with(
asm.state['cxn'],
asm.state['iteration'],
asm.args['bit_score'],
asm.args['contig_length'])
expect = ('No contigs had a bit score greater than {} and are at '
'least {} bp long in iteration {}. The highest score for '
'this iteration is {}').format(
asm.args['bit_score'],
asm.args['contig_length'],
asm.state['iteration'],
high_score)
info.assert_called_once_with(expect)
@patch('lib.log.info')
@patch('lib.db_atram.assembled_contigs_count')
def test_assembled_contigs_count_02(assembled_contigs_count, info):
"""Handle when here is one contig."""
high_score = 5
count = 1
asm = build_assembler()
assembled_contigs_count.return_value = count
assert asm.assembled_contigs_count(high_score) == count
assembled_contigs_count.assert_called_once_with(
asm.state['cxn'],
asm.state['iteration'],
asm.args['bit_score'],
asm.args['contig_length'])
info.assert_not_called()
@patch('lib.log.info')
@patch('lib.db_atram.iteration_overlap_count')
def test_no_new_contigs_01(iteration_overlap_count, info):
"""It handles when there are new contigs."""
count = 1
asm = build_assembler()
iteration_overlap_count.return_value = count + 1
assert not asm.no_new_contigs(count)
iteration_overlap_count.assert_called_once_with(
asm.state['cxn'],
asm.state['iteration'],
asm.args['bit_score'],
asm.args['contig_length'])
info.assert_not_called()
@patch('lib.log.info')
@patch('lib.db_atram.iteration_overlap_count')
def test_no_new_contigs_02(iteration_overlap_count, info):
"""It handles when there are no new contigs."""
count = 1
asm = build_assembler()
iteration_overlap_count.return_value = count
assert asm.no_new_contigs(count)
iteration_overlap_count.assert_called_once_with(
asm.state['cxn'],
asm.state['iteration'],
asm.args['bit_score'],
asm.args['contig_length'])
expect = 'No new contigs were found in iteration {}'.format(
asm.state['iteration'])
info.assert_called_once_with(expect)
@patch('lib.log.subcommand')
def test_assemble(subcommand):
"""It runs the assembler."""
asm = build_assembler()
asm.args['timeout'] = 333
asm.post_assembly = MagicMock()
asm.step1 = MagicMock(return_value='step1')
asm.step2 = MagicMock(return_value='step2')
asm.steps = [asm.step1, asm.step2]
asm.assemble()
calls = [
call('step1',
asm.args['temp_dir'],
asm.args['timeout']),
call('step2',
asm.args['temp_dir'],
asm.args['timeout'])]
subcommand.assert_has_calls(calls)
| {
"content_hash": "48c8668ed095d0c953b75adf5968fefc",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 78,
"avg_line_length": 28.324858757062145,
"alnum_prop": 0.6223197367108806,
"repo_name": "AntonelliLab/seqcap_processor",
"id": "ac2b99ebf0d3eb7c0f41004e416cff74f3a90ecc",
"size": "10027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/aTRAM-master/tests/lib/assemblers/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4198"
},
{
"name": "Python",
"bytes": "584979"
},
{
"name": "R",
"bytes": "6478"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
} |
import sys
from numba.core.utils import _RedirectSubpackage
sys.modules[__name__] = _RedirectSubpackage(locals(), "numba.core.types")
| {
"content_hash": "263cf3d4749210677eacbb67513b7a4f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 73,
"avg_line_length": 33.75,
"alnum_prop": 0.762962962962963,
"repo_name": "gmarkall/numba",
"id": "3bb9f21980380f18f25e3a859cc90f1449c483bf",
"size": "135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/types/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6761"
},
{
"name": "C",
"bytes": "625527"
},
{
"name": "C++",
"bytes": "85627"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8467098"
},
{
"name": "Shell",
"bytes": "8286"
}
],
"symlink_target": ""
} |
"""Common code for converting proto to other formats, such as JSON."""
import base64
import collections
import datetime
import json
import six
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import protojson
from apitools.base.py import exceptions
_Codec = collections.namedtuple('_Codec', ['encoder', 'decoder'])
CodecResult = collections.namedtuple('CodecResult', ['value', 'complete'])
class EdgeType(object):
"""The type of transition made by an edge."""
SCALAR = 1
REPEATED = 2
MAP = 3
class ProtoEdge(collections.namedtuple('ProtoEdge',
['type_', 'field', 'index'])):
"""A description of a one-level transition from a message to a value.
Protobuf messages can be arbitrarily nested as fields can be defined with
any "message" type. This nesting property means that there are often many
levels of proto messages within a single message instance. This class can
unambiguously describe a single step from a message to some nested value.
Properties:
type_: EdgeType, The type of transition represented by this edge.
field: str, The name of the message-typed field.
index: Any, Additional data needed to make the transition. The semantics
of the "index" property change based on the value of "type_":
SCALAR: ignored.
REPEATED: a numeric index into "field"'s list.
MAP: a key into "field"'s mapping.
"""
__slots__ = ()
def __str__(self):
if self.type_ == EdgeType.SCALAR:
return self.field
else:
return '{}[{}]'.format(self.field, self.index)
# TODO(craigcitro): Make these non-global.
_UNRECOGNIZED_FIELD_MAPPINGS = {}
_CUSTOM_MESSAGE_CODECS = {}
_CUSTOM_FIELD_CODECS = {}
_FIELD_TYPE_CODECS = {}
def MapUnrecognizedFields(field_name):
"""Register field_name as a container for unrecognized fields."""
def Register(cls):
_UNRECOGNIZED_FIELD_MAPPINGS[cls] = field_name
return cls
return Register
def RegisterCustomMessageCodec(encoder, decoder):
"""Register a custom encoder/decoder for this message class."""
def Register(cls):
_CUSTOM_MESSAGE_CODECS[cls] = _Codec(encoder=encoder, decoder=decoder)
return cls
return Register
def RegisterCustomFieldCodec(encoder, decoder):
"""Register a custom encoder/decoder for this field."""
def Register(field):
_CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder)
return field
return Register
def RegisterFieldTypeCodec(encoder, decoder):
"""Register a custom encoder/decoder for all fields of this type."""
def Register(field_type):
_FIELD_TYPE_CODECS[field_type] = _Codec(
encoder=encoder, decoder=decoder)
return field_type
return Register
def CopyProtoMessage(message):
"""Make a deep copy of a message."""
return JsonToMessage(type(message), MessageToJson(message))
def MessageToJson(message, include_fields=None):
"""Convert the given message to JSON."""
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields)
def JsonToMessage(message_type, message):
"""Convert the given JSON to a message of type message_type."""
return _ProtoJsonApiTools.Get().decode_message(message_type, message)
# TODO(craigcitro): Do this directly, instead of via JSON.
def DictToMessage(d, message_type):
"""Convert the given dictionary to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(d))
def MessageToDict(message):
"""Convert the given message to a dictionary."""
return json.loads(MessageToJson(message))
def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
"""Convert the given dictionary to an AdditionalProperty message."""
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_)
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value))
def MessageToPyValue(message):
"""Convert the given message to a python value."""
return json.loads(MessageToJson(message))
def MessageToRepr(msg, multiline=False, **kwargs):
"""Return a repr-style string for a protorpc message.
protorpc.Message.__repr__ does not return anything that could be considered
python code. Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later, and used to compare against
other things.
Args:
msg: protorpc.Message, the message to be repr'd.
multiline: bool, True if the returned string should have each field
assignment on its own line.
**kwargs: {str:str}, Additional flags for how to format the string.
Known **kwargs:
shortstrings: bool, True if all string values should be
truncated at 100 characters, since when mocking the contents
typically don't matter except for IDs, and IDs are usually
less than 100 characters.
no_modules: bool, True if the long module name should not be printed with
each type.
Returns:
str, A string of valid python (assuming the right imports have been made)
that recreates the message passed into this function.
"""
# TODO(jasmuth): craigcitro suggests a pretty-printer from apitools/gen.
indent = kwargs.get('indent', 0)
def IndentKwargs(kwargs):
kwargs = dict(kwargs)
kwargs['indent'] = kwargs.get('indent', 0) + 4
return kwargs
if isinstance(msg, list):
s = '['
for item in msg:
if multiline:
s += '\n' + ' ' * (indent + 4)
s += MessageToRepr(
item, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ']'
return s
if isinstance(msg, messages.Message):
s = type(msg).__name__ + '('
if not kwargs.get('no_modules'):
s = msg.__module__ + '.' + s
names = sorted([field.name for field in msg.all_fields()])
for name in names:
field = msg.field_by_name(name)
if multiline:
s += '\n' + ' ' * (indent + 4)
value = getattr(msg, field.name)
s += field.name + '=' + MessageToRepr(
value, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ')'
return s
if isinstance(msg, six.string_types):
if kwargs.get('shortstrings') and len(msg) > 100:
msg = msg[:100]
if isinstance(msg, datetime.datetime):
class SpecialTZInfo(datetime.tzinfo):
def __init__(self, offset):
super(SpecialTZInfo, self).__init__()
self.offset = offset
def __repr__(self):
s = 'TimeZoneOffset(' + repr(self.offset) + ')'
if not kwargs.get('no_modules'):
s = 'apitools.base.protorpclite.util.' + s
return s
msg = datetime.datetime(
msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second,
msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))
return repr(msg)
def _GetField(message, field_path):
for field in field_path:
if field not in dir(message):
raise KeyError('no field "%s"' % field)
message = getattr(message, field)
return message
def _SetField(dictblob, field_path, value):
for field in field_path[:-1]:
dictblob = dictblob.setdefault(field, {})
dictblob[field_path[-1]] = value
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result)
def _GetFieldCodecs(field, attr):
result = [
getattr(_CUSTOM_FIELD_CODECS.get(field), attr, None),
getattr(_FIELD_TYPE_CODECS.get(type(field)), attr, None),
]
return [x for x in result if x is not None]
class _ProtoJsonApiTools(protojson.ProtoJson):
"""JSON encoder used by apitools clients."""
_INSTANCE = None
@classmethod
def Get(cls):
if cls._INSTANCE is None:
cls._INSTANCE = cls()
return cls._INSTANCE
def decode_message(self, message_type, encoded_message):
if message_type in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[
message_type].decoder(encoded_message)
result = _DecodeCustomFieldNames(message_type, encoded_message)
result = super(_ProtoJsonApiTools, self).decode_message(
message_type, result)
result = _ProcessUnknownEnums(result, encoded_message)
result = _ProcessUnknownMessages(result, encoded_message)
return _DecodeUnknownFields(result, encoded_message)
def decode_field(self, field, value):
"""Decode the given JSON value.
Args:
field: a messages.Field for the field we're decoding.
value: a python value we'd like to decode.
Returns:
A value suitable for assignment to field.
"""
for decoder in _GetFieldCodecs(field, 'decoder'):
result = decoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.MessageField):
field_value = self.decode_message(
field.message_type, json.dumps(value))
elif isinstance(field, messages.EnumField):
value = GetCustomJsonEnumMapping(
field.type, json_name=value) or value
try:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
except messages.DecodeError:
if not isinstance(value, six.string_types):
raise
field_value = None
else:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
return field_value
def encode_message(self, message):
if isinstance(message, messages.FieldList):
return '[%s]' % (', '.join(self.encode_message(x)
for x in message))
# pylint: disable=unidiomatic-typecheck
if type(message) in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[type(message)].encoder(message)
message = _EncodeUnknownFields(message)
result = super(_ProtoJsonApiTools, self).encode_message(message)
result = _EncodeCustomFieldNames(message, result)
return json.dumps(json.loads(result), sort_keys=True)
def encode_field(self, field, value):
"""Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
"""
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
# TODO(craigcitro): Fold this and _IncludeFields in as codecs.
def _DecodeUnknownFields(message, encoded_message):
"""Rewrite unknown fields in message into message.destination."""
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if destination is None:
return message
pair_field = message.field_by_name(destination)
if not isinstance(pair_field, messages.MessageField):
raise exceptions.InvalidDataFromServerError(
'Unrecognized fields must be mapped to a compound '
'message type.')
pair_type = pair_field.message_type
# TODO(craigcitro): Add more error checking around the pair
# type being exactly what we suspect (field names, etc).
if isinstance(pair_type.value, messages.MessageField):
new_values = _DecodeUnknownMessages(
message, json.loads(encoded_message), pair_type)
else:
new_values = _DecodeUnrecognizedFields(message, pair_type)
setattr(message, destination, new_values)
# We could probably get away with not setting this, but
# why not clear it?
setattr(message, '_Message__unrecognized_fields', {})
return message
def _DecodeUnknownMessages(message, encoded_message, pair_type):
"""Process unknown fields in encoded_message of a message type."""
field_type = pair_type.value.type
new_values = []
all_field_names = [x.name for x in message.all_fields()]
for name, value_dict in six.iteritems(encoded_message):
if name in all_field_names:
continue
value = PyValueToMessage(field_type, value_dict)
if pair_type.value.repeated:
value = _AsMessageList(value)
new_pair = pair_type(key=name, value=value)
new_values.append(new_pair)
return new_values
def _DecodeUnrecognizedFields(message, pair_type):
"""Process unrecognized fields in message."""
new_values = []
codec = _ProtoJsonApiTools.Get()
for unknown_field in message.all_unrecognized_fields():
# TODO(craigcitro): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = codec.decode_field(
pair_type.value, value)
try:
new_pair_key = str(unknown_field)
except UnicodeEncodeError:
new_pair_key = protojson.ProtoJson().decode_field(
pair_type.key, unknown_field)
new_pair = pair_type(key=new_pair_key, value=decoded_value)
new_values.append(new_pair)
return new_values
def _CopyProtoMessageVanillaProtoJson(message):
codec = protojson.ProtoJson()
return codec.decode_message(type(message), codec.encode_message(message))
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result
def _SafeEncodeBytes(field, value):
"""Encode the bytes in value as urlsafe base64."""
try:
if field.repeated:
result = [base64.urlsafe_b64encode(byte) for byte in value]
else:
result = base64.urlsafe_b64encode(value)
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _SafeDecodeBytes(unused_field, value):
"""Decode the urlsafe base64 value into bytes."""
try:
result = base64.urlsafe_b64decode(str(value))
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _ProcessUnknownEnums(message, encoded_message):
"""Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields.
"""
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and
field.name in decoded_message):
value = message.get_assigned_value(field.name)
if ((field.repeated and len(value) != len(decoded_message[field.name])) or
value is None):
message.set_unrecognized_field(
field.name, decoded_message[field.name], messages.Variant.ENUM)
return message
def _ProcessUnknownMessages(message, encoded_message):
"""Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (so that they can easily be reserialized).
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any remaining unrecognized fields saved.
"""
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
message_fields = [x.name for x in message.all_fields()] + list(
message.all_unrecognized_fields())
missing_fields = [x for x in decoded_message.keys()
if x not in message_fields]
for field_name in missing_fields:
message.set_unrecognized_field(field_name, decoded_message[field_name],
messages.Variant.STRING)
return message
RegisterFieldTypeCodec(_SafeEncodeBytes, _SafeDecodeBytes)(messages.BytesField)
# Note that these could share a dictionary, since they're keyed by
# distinct types, but it's not really worth it.
_JSON_ENUM_MAPPINGS = {}
_JSON_FIELD_MAPPINGS = {}
def AddCustomJsonEnumMapping(enum_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
"""Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility.
"""
if not issubclass(enum_type, messages.Enum):
raise exceptions.TypecheckError(
'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
if python_name not in enum_type.names():
raise exceptions.InvalidDataError(
'Enum value %s not a value for type %s' % (python_name, enum_type))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name
def AddCustomJsonFieldMapping(message_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
"""Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility.
"""
if not issubclass(message_type, messages.Message):
raise exceptions.TypecheckError(
'Cannot set JSON field mapping for '
'non-message "%s"' % message_type)
try:
_ = message_type.field_by_name(python_name)
except KeyError:
raise exceptions.InvalidDataError(
'Field %s not recognized for type %s' % (
python_name, message_type))
field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {})
_CheckForExistingMappings('field', message_type, python_name, json_name)
field_mappings[python_name] = json_name
def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given enum, or None."""
return _FetchRemapping(enum_type, 'enum',
python_name=python_name, json_name=json_name,
mappings=_JSON_ENUM_MAPPINGS)
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS)
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None,
mappings=None):
"""Common code for fetching a key or value from a remapping dict."""
if python_name and json_name:
raise exceptions.InvalidDataError(
'Cannot specify both python_name and json_name '
'for %s remapping' % mapping_type)
if not (python_name or json_name):
raise exceptions.InvalidDataError(
'Must specify either python_name or json_name for %s remapping' % (
mapping_type,))
field_remappings = mappings.get(type_name, {})
if field_remappings:
if python_name:
return field_remappings.get(python_name)
elif json_name:
if json_name in list(field_remappings.values()):
return [k for k in field_remappings
if field_remappings[k] == json_name][0]
return None
def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name):
"""Validate that no mappings exist for the given values."""
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None and remapping != json_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None and remapping != python_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping))
def _EncodeCustomFieldNames(message, encoded_value):
field_remappings = list(_JSON_FIELD_MAPPINGS.get(type(message), {})
.items())
if field_remappings:
decoded_value = json.loads(encoded_value)
for python_name, json_name in field_remappings:
if python_name in encoded_value:
decoded_value[json_name] = decoded_value.pop(python_name)
encoded_value = json.dumps(decoded_value)
return encoded_value
def _DecodeCustomFieldNames(message_type, encoded_message):
field_remappings = _JSON_FIELD_MAPPINGS.get(message_type, {})
if field_remappings:
decoded_message = json.loads(encoded_message)
for python_name, json_name in list(field_remappings.items()):
if json_name in decoded_message:
decoded_message[python_name] = decoded_message.pop(json_name)
encoded_message = json.dumps(decoded_message)
return encoded_message
def _AsMessageList(msg):
"""Convert the provided list-as-JsonValue to a list."""
# This really needs to live in extra_types, but extra_types needs
# to import this file to be able to register codecs.
# TODO(craigcitro): Split out a codecs module and fix this ugly
# import.
from apitools.base.py import extra_types
def _IsRepeatedJsonValue(msg):
"""Return True if msg is a repeated value as a JsonValue."""
if isinstance(msg, extra_types.JsonArray):
return True
if isinstance(msg, extra_types.JsonValue) and msg.array_value:
return True
return False
if not _IsRepeatedJsonValue(msg):
raise ValueError('invalid argument to _AsMessageList')
if isinstance(msg, extra_types.JsonValue):
msg = msg.array_value
if isinstance(msg, extra_types.JsonArray):
msg = msg.entries
return msg
def _IsMap(message, field):
"""Returns whether the "field" is actually a map-type."""
value = message.get_assigned_value(field.name)
if not isinstance(value, messages.Message):
return False
try:
additional_properties = value.field_by_name('additionalProperties')
except KeyError:
return False
else:
return additional_properties.repeated
def _MapItems(message, field):
"""Yields the (key, value) pair of the map values."""
assert _IsMap(message, field)
map_message = message.get_assigned_value(field.name)
additional_properties = map_message.get_assigned_value(
'additionalProperties')
for kv_pair in additional_properties:
yield kv_pair.key, kv_pair.value
def UnrecognizedFieldIter(message, _edges=()): # pylint: disable=invalid-name
"""Yields the locations of unrecognized fields within "message".
If a sub-message is found to have unrecognized fields, that sub-message
will not be searched any further. We prune the search of the sub-message
because we assume it is malformed and further checks will not yield
productive errors.
Args:
message: The Message instance to search.
_edges: Internal arg for passing state.
Yields:
(edges_to_message, field_names):
edges_to_message: List[ProtoEdge], The edges (relative to "message")
describing the path to the sub-message where the unrecognized
fields were found.
field_names: List[Str], The names of the field(s) that were
unrecognized in the sub-message.
"""
if not isinstance(message, messages.Message):
# This is a primitive leaf, no errors found down this path.
return
field_names = message.all_unrecognized_fields()
if field_names:
# This message is malformed. Stop recursing and report it.
yield _edges, field_names
return
# Recurse through all fields in the current message.
for field in message.all_fields():
value = message.get_assigned_value(field.name)
if field.repeated:
for i, item in enumerate(value):
repeated_edge = ProtoEdge(EdgeType.REPEATED, field.name, i)
iter_ = UnrecognizedFieldIter(item, _edges + (repeated_edge,))
for (e, y) in iter_:
yield e, y
elif _IsMap(message, field):
for key, item in _MapItems(message, field):
map_edge = ProtoEdge(EdgeType.MAP, field.name, key)
iter_ = UnrecognizedFieldIter(item, _edges + (map_edge,))
for (e, y) in iter_:
yield e, y
else:
scalar_edge = ProtoEdge(EdgeType.SCALAR, field.name, None)
iter_ = UnrecognizedFieldIter(value, _edges + (scalar_edge,))
for (e, y) in iter_:
yield e, y
| {
"content_hash": "c0d68f7bf276f450b921b7f00fec6544",
"timestamp": "",
"source": "github",
"line_count": 792,
"max_line_length": 86,
"avg_line_length": 38.025252525252526,
"alnum_prop": 0.6423495816177447,
"repo_name": "google/apitools",
"id": "2d8a449339460809035be40b5a04f820911b9de3",
"size": "30716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apitools/base/py/encoding_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "832792"
}
],
"symlink_target": ""
} |
from json import *
class Track:
def __init__(self, json=None):
if json != None:
self.fromJson(json)
def fromJson(self, json):
self.href = json['href']
self.id = json['id']
self.name = json['name']
self.playbackURL = json['preview_url']
self.popularity = json['popularity']
if json['album'] != None:
self.albumName = json['album']['name']
if len(json['album']['images']) > 0:
self.albumImageURL = json['album']['images'][0]['url']
else:
self.albumImageURL = ""
else:
self.albumName = ""
if len(json['artists']) > 0:
self.artistId = json['artists'][0]['id']
self.artistName = json['artists'][0]['name']
else:
self.artistId = ""
self.artistName = ""
def toSQLInsert(self, extra_fields = {}):
keys = ",".join(extra_fields.keys())
columns = "(ID,href,name,playbackURL,popularity,albumName,albumImageURL,artistID,artistName,"+keys+")"
values = (self.id,self.href,self.name,str(self.playbackURL),str(self.popularity),self.albumName,self.albumImageURL, self.artistId, self.artistName) #"('"+self.id+"','"+self.href+"','"+self.name+"','"+str(self.playbackURL)+"',"+str(self.popularity)+",'"+self.albumName+"','"+self.albumImageURL+"','"+self.artistId+"','"+self.artistName+"', "+valuesextra+")"
for x in extra_fields:
values = values + (extra_fields[x],)
return columns, values
| {
"content_hash": "79c251c08e968633312b896ecb45413e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 360,
"avg_line_length": 35.8,
"alnum_prop": 0.604050279329609,
"repo_name": "fbuitron/FBMusic_ML_be",
"id": "544658ad07f7dd4fcf5e4175bea9d8bacddf0dd2",
"size": "1432",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BATCH/Model/Track.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "616"
},
{
"name": "HTML",
"bytes": "7204"
},
{
"name": "JavaScript",
"bytes": "5934"
},
{
"name": "Python",
"bytes": "48044"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import abc
import os
import re
import glob
import logging
import fnmatch
import json
import six
from six.moves import zip
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
logger = logging.getLogger(__name__)
class AbstractDrone(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen"s as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
"""
This is a bit confusing, since there maybe be multi-steps. By
default, assimilate will try to find a file simply named
vasprun.xml, vasprun.xml.bz2, or vasprun.xml.gz. Failing which
it will try to get a relax2 from an aflow style run if
possible. Or else, a randomly chosen file containing
vasprun.xml is chosen.
"""
for fname in vasprun_files:
if os.path.basename(fname) in ["vasprun.xml",
"vasprun.xml.gz",
"vasprun.xml.bz2"]:
filepath = fname
break
if re.search("relax2", fname):
filepath = fname
break
filepath = fname
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = glob.glob(os.path.join(path, filename + "*"))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename == "POTCAR" else files[0]
elif len(files) > 1:
"""
This is a bit confusing, since there maybe be
multiple steps. By default, assimilate will try to find
a file simply named filename, filename.bz2, or
filename.gz. Failing which it will try to get a relax2
from a custodian double relaxation style run if
possible. Or else, a random file is chosen.
"""
for fname in files:
if fnmatch.fnmatch(os.path.basename(fname),
"{}(\.gz|\.bz2)*"
.format(filename)):
files_to_parse[filename] = fname
break
if fname == "POSCAR" and \
re.search("relax1", fname):
files_to_parse[filename] = fname
break
if (fname in ("CONTCAR", "OSZICAR") and
re.search("relax2", fname)):
files_to_parse[filename] = fname
break
files_to_parse[filename] = fname
poscar, contcar, incar, potcar, oszicar, dynmat = [None]*6
if 'POSCAR' in files_to_parse:
poscar = Poscar.from_file(files_to_parse["POSCAR"])
if 'CONTCAR' in files_to_parse:
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
if 'INCAR' in files_to_parse:
incar = Incar.from_file(files_to_parse["INCAR"])
if 'POTCAR' in files_to_parse:
potcar = Potcar.from_file(files_to_parse["POTCAR"])
if 'OSZICAR' in files_to_parse:
oszicar = Oszicar(files_to_parse["OSZICAR"])
if 'DYNMAT' in files_to_parse:
dynmat = Dynmat(files_to_parse["DYNMAT"])
param = {"hubbards":{}}
if poscar is not None and incar is not None and "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
param["is_hubbard"] = (
incar.get("LDAU", False) and sum(param["hubbards"].values()) > 0
) if incar is not None else False
param["run_type"] = None
if incar is not None:
param["run_type"] = "GGA+U" if param["is_hubbard"] else "GGA"
param["history"] = _get_transformation_history(path)
param["potcar_spec"] = potcar.spec if potcar is not None else None
energy = oszicar.final_energy if oszicar is not None else 1e10
structure = contcar.structure if contcar is not None\
else poscar.structure
initial_vol = poscar.structure.volume if poscar is not None else \
None
final_vol = contcar.structure.volume if contcar is not None else \
None
delta_volume = None
if initial_vol is not None and final_vol is not None:
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if dynmat is not None:
data['phonon_frequencies'] = dynmat.get_phonon_frequencies()
if self._inc_structure:
entry = ComputedStructureEntry(
structure, energy, parameters=param, data=data
)
else:
entry = ComputedEntry(
structure.composition, energy, parameters=param, data=data
)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters is None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data is None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge", "spin_mult",
"route"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
| {
"content_hash": "15dafd9b35b3dfd0e1530d101b9c6d9b",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 80,
"avg_line_length": 41.016786570743406,
"alnum_prop": 0.5481758652946679,
"repo_name": "ndardenne/pymatgen",
"id": "5b8f4ba742d33dc0f7edc0d38d319a24cfdd10a6",
"size": "17214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/apps/borg/hive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5203893"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
from tornado.web import RequestHandler
from swampdragon.default_settings import SwampDragonSettings
from django.conf import settings as django_settings
def get_host():
host = django_settings.DRAGON_URL
if host.endswith('/'):
return host[:-1]
return host
class SettingsHandler(RequestHandler):
def set_default_headers(self):
self.set_header("Content-Type", "application/javascript")
def get(self, *args, **kwargs):
data = '''window.swampdragon_settings = {settings};
window.swampdragon_host = "{host}";
'''.format(**{
'settings': SwampDragonSettings().to_dict(),
'host': get_host()
})
self.write(data)
| {
"content_hash": "a1c7137e43cddc563490c0031cdd5854",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 28.75,
"alnum_prop": 0.6565217391304348,
"repo_name": "sahlinet/swampdragon",
"id": "d1216efd2b4853db5f9be2410d02ea2af59567a3",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swampdragon/settings_provider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4233"
},
{
"name": "HTML",
"bytes": "8346"
},
{
"name": "JavaScript",
"bytes": "220961"
},
{
"name": "Python",
"bytes": "175197"
}
],
"symlink_target": ""
} |
"""remove_freshmen_project
Revision ID: 4ac8ff82410a
Revises: 704962eabf9c
Create Date: 2019-09-06 11:21:28.515040
"""
# revision identifiers, used by Alembic.
revision = '4ac8ff82410a'
down_revision = '704962eabf9c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('freshman_eval_data', 'freshman_project',
existing_type=postgresql.ENUM('Pending', 'Passed', 'Failed', name='freshman_project_enum'),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('freshman_eval_data', 'freshman_project',
existing_type=postgresql.ENUM('Pending', 'Passed', 'Failed', name='freshman_project_enum'),
nullable=False)
# ### end Alembic commands ###
| {
"content_hash": "1620d39e6224dfe58343cabe82e85164",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 106,
"avg_line_length": 31.566666666666666,
"alnum_prop": 0.6715945089757128,
"repo_name": "ComputerScienceHouse/conditional",
"id": "bc2352acbb8970225443501f20225f5bf59f666d",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "migrations/versions/4ac8ff82410a_remove_freshmen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "820"
},
{
"name": "HTML",
"bytes": "133723"
},
{
"name": "JavaScript",
"bytes": "92318"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "151242"
},
{
"name": "SCSS",
"bytes": "40554"
}
],
"symlink_target": ""
} |
"""Nodes add console enabled
Revision ID: 3cb628139ea4
Revises: 21b331f883ef
Create Date: 2014-02-26 11:24:11.318023
"""
# revision identifiers, used by Alembic.
revision = '3cb628139ea4'
down_revision = '21b331f883ef'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes',
sa.Column('console_enabled', sa.Boolean)
)
def downgrade():
op.drop_column('nodes', 'console_enabled')
| {
"content_hash": "cf0ccd13a407db6ed3a11c7269601cf6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 48,
"avg_line_length": 18.708333333333332,
"alnum_prop": 0.6859688195991092,
"repo_name": "rdo-management/ironic",
"id": "7fe1ca47588011045e3878dc8c0ee342e479da82",
"size": "1022",
"binary": false,
"copies": "5",
"ref": "refs/heads/mgt-master",
"path": "ironic/db/sqlalchemy/alembic/versions/3cb628139ea4_nodes_add_console_enabled.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3067721"
}
],
"symlink_target": ""
} |
import argparse
import glob
import os
import os.path
import shutil
from subprocess import call, Popen
import tempfile
CODESIGN_BIN = '/usr/bin/codesign'
PLIST_BUDDY_BIN = '/usr/libexec/PlistBuddy'
SECURITY_BIN = '/usr/bin/security'
ZIP_BIN = '/usr/bin/zip'
UNZIP_BIN = '/usr/bin/unzip'
class ReceivedApp(object):
def __init__(self, path):
self.path = path
def unpack_to_dir(self, unpack_dir):
app_name = os.path.basename(self.path)
target_dir = os.path.join(unpack_dir, app_name)
shutil.copytree(self.path, target_dir)
return App(target_dir)
class ReceivedIpaApp(ReceivedApp):
def unpack_to_dir(self, target_dir):
call([UNZIP_BIN, "-qu", self.path, "-d", target_dir])
return IpaApp(target_dir)
class App(object):
def __init__(self, path):
self.path = path
self.entitlements_path = os.path.join(self.path,
'Entitlements.plist')
self.app_dir = self.get_app_dir()
self.provision_path = os.path.join(self.app_dir,
'embedded.mobileprovision')
def get_app_dir(self):
return self.path
def provision(self, provision_path):
print("provision_path: {0}".format(provision_path))
shutil.copyfile(provision_path, self.provision_path)
def create_entitlements(self):
# we decode part of the provision path, then extract the
# Entitlements part, then write that to a file in the app.
# piping to Plistbuddy doesn't seem to work :(
# hence, temporary intermediate file
decoded_provision_fh, decoded_provision_path = tempfile.mkstemp()
decoded_provision_fh = open(decoded_provision_path, 'w')
decode_args = [SECURITY_BIN, 'cms', '-D', '-i', self.provision_path]
process = Popen(decode_args, stdout=decoded_provision_fh)
# if we don't wait for this to complete, it's likely
# the next part will see a zero-length file
process.wait()
get_entitlements_cmd = [
PLIST_BUDDY_BIN,
'-x',
'-c',
'print :Entitlements ',
decoded_provision_path]
entitlements_fh = open(self.entitlements_path, 'w')
process2 = Popen(get_entitlements_cmd, stdout=entitlements_fh)
process2.wait()
entitlements_fh.close()
# should destroy the file
decoded_provision_fh.close()
def codesign(self, certificate, path, extra_args=[]):
call([CODESIGN_BIN, '-f', '-s', certificate] + extra_args + [path])
def sign(self, certificate):
# first sign all the dylibs
frameworks_path = os.path.join(self.app_dir, 'Frameworks')
if os.path.exists(frameworks_path):
dylibs = glob.glob(os.path.join(frameworks_path, '*.dylib'))
for dylib in dylibs:
self.codesign(certificate, dylib)
# then sign the app
self.codesign(certificate,
self.app_dir,
['--entitlements', self.entitlements_path])
def package(self, output_path):
if not output_path.endswith('.app'):
output_path = output_path + '.app'
os.rename(self.app_dir, output_path)
return output_path
class IpaApp(App):
def _get_payload_dir(self):
return os.path.join(self.path, "Payload")
def get_app_dir(self):
glob_path = os.path.join(self._get_payload_dir(), '*.app')
apps = glob.glob(glob_path)
count = len(apps)
if count != 1:
err = "Expected 1 app in {0}, found {1}".format(glob_path, count)
raise Exception(err)
return apps[0]
def package(self, output_path):
if not output_path.endswith('.ipa'):
output_path = output_path + '.ipa'
temp = "out.ipa"
# need to chdir and use relative paths, because zip is stupid
old_cwd = os.getcwd()
os.chdir(self.path)
relative_payload_path = os.path.relpath(self._get_payload_dir(),
self.path)
call([ZIP_BIN, "-qr", temp, relative_payload_path])
os.rename(temp, output_path)
os.chdir(old_cwd)
return output_path
def absolute_path_argument(path):
return os.path.abspath(path)
def exists_absolute_path_argument(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError("%s does not exist!" % path)
return absolute_path_argument(path)
def app_argument(path):
path = exists_absolute_path_argument(path)
_, extension = os.path.splitext(path)
if extension == '.app':
app = ReceivedApp(path)
elif extension == '.ipa':
app = ReceivedIpaApp(path)
else:
raise argparse.ArgumentTypeError(
"{0} doesn't seem to be an .app or .ipa".format(path))
return app
def parse_args():
parser = argparse.ArgumentParser(
description='Resign an iOS application with a new identity '
'and provisioning profile.')
parser.add_argument(
'-p', '--provisioning-profile',
dest='provisioning_profile',
required=True,
metavar='<your.mobileprovision>',
type=exists_absolute_path_argument,
help='Path to provisioning profile')
parser.add_argument(
'-c', '--certificate',
dest='certificate',
required=True,
metavar='<certificate>',
help='Identifier for the certificate in your keychain. '
'See `security find-identity` for a list, or '
'`man codesign` for valid ways to specify it.')
parser.add_argument(
'-s', '--staging',
dest='stage_dir',
required=False,
metavar='<path>',
type=absolute_path_argument,
default=os.path.join(os.getcwd(), 'stage'),
help='Path to stage directory.')
parser.add_argument(
'-o', '--output',
dest='output_path',
required=False,
metavar='<path>',
type=absolute_path_argument,
default=os.path.join(os.getcwd(), 'out'),
help='Path to output file or directory')
parser.add_argument(
'app',
nargs=1,
metavar='<path>',
type=app_argument,
help='Path to application to re-sign, typically a '
'directory ending in .app or file ending in .ipa.')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
received_app = args.app[0]
if os.path.exists(args.stage_dir):
shutil.rmtree(args.stage_dir)
os.mkdir(args.stage_dir)
app = received_app.unpack_to_dir(args.stage_dir)
app.provision(args.provisioning_profile)
app.create_entitlements()
app.sign(args.certificate)
output_path = app.package(args.output_path)
if os.path.exists(args.stage_dir):
shutil.rmtree(args.stage_dir)
print("Re-signed package: {0}".format(output_path))
| {
"content_hash": "737104276c3705be9f4daa8903805c00",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 77,
"avg_line_length": 32.94930875576037,
"alnum_prop": 0.5825174825174825,
"repo_name": "CiNC0/Cartier",
"id": "6defe0e3c61cf351862966b6af97050aa63661a0",
"size": "7209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cartier-python-resign-linux/apple/provisions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "891"
},
{
"name": "Java",
"bytes": "6775528"
},
{
"name": "Makefile",
"bytes": "13867"
},
{
"name": "Python",
"bytes": "251197"
},
{
"name": "Ruby",
"bytes": "226"
},
{
"name": "Shell",
"bytes": "117358"
},
{
"name": "Swift",
"bytes": "18348"
}
],
"symlink_target": ""
} |
from room import Room
r = Room()
r.roomname = 'eastbrook'
r.exits = {'intersection': 'intersection','house 6': 'house6', 'house 12': 'house12'}
r.roomdesc = """
east brook street houses number 12 and 6 are targets
"""
r.looktargets = {'house 12': 'its red\n\n',
'house 6': 'its blue\n\n'}
| {
"content_hash": "989bb0d2d5b8b862b93411e95ab375e0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 85,
"avg_line_length": 30.8,
"alnum_prop": 0.6201298701298701,
"repo_name": "elstupido/rpg",
"id": "3173dfe07f7411e4cfd89b5bd7d14149058070ce",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rooms/first op/eastbrook.room.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56488"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import frappe, os, json
from frappe.modules import get_module_path, scrub_dt_dn
from frappe.utils import get_datetime_str
def import_files(module, dt=None, dn=None, force=False, pre_process=None, reset_permissions=False):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], force=force, pre_process=pre_process,
reset_permissions=reset_permissions))
return out
else:
return import_file(module, dt, dn, force=force, pre_process=pre_process,
reset_permissions=reset_permissions)
def import_file(module, dt, dn, force=False, pre_process=None, reset_permissions=False):
"""Sync a file from txt if modifed, return false if not updated"""
path = get_file_path(module, dt, dn)
ret = import_file_by_path(path, force, pre_process=pre_process, reset_permissions=reset_permissions)
return ret
def get_file_path(module, dt, dn):
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + ".json"))
return path
def import_file_by_path(path, force=False, data_import=False, pre_process=None, ignore_version=None,
reset_permissions=False, for_sync=False):
try:
docs = read_doc_from_file(path)
except IOError:
print (path + " missing")
return
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
# check if timestamps match
db_modified = frappe.db.get_value(doc['doctype'], doc['name'], 'modified')
if db_modified and doc.get('modified')==get_datetime_str(db_modified):
return False
original_modified = doc.get("modified")
frappe.flags.in_import = True
import_doc(doc, force=force, data_import=data_import, pre_process=pre_process,
ignore_version=ignore_version, reset_permissions=reset_permissions)
frappe.flags.in_import = False
if original_modified:
# since there is a new timestamp on the file, update timestamp in
if doc["doctype"] == doc["name"] and doc["name"]!="DocType":
frappe.db.sql("""update tabSingles set value=%s where field="modified" and doctype=%s""",
(original_modified, doc["name"]))
else:
frappe.db.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
return True
def read_doc_from_file(path):
doc = None
if os.path.exists(path):
with open(path, 'r') as f:
try:
doc = json.loads(f.read())
except ValueError:
print("bad json: {0}".format(path))
raise
else:
raise IOError('%s missing' % path)
return doc
ignore_values = {
"Report": ["disabled"],
"Print Format": ["disabled"],
"Email Alert": ["enabled"],
"Print Style": ["disabled"]
}
ignore_doctypes = [""]
def import_doc(docdict, force=False, data_import=False, pre_process=None,
ignore_version=None, reset_permissions=False):
frappe.flags.in_import = True
docdict["__islocal"] = 1
doc = frappe.get_doc(docdict)
doc.flags.ignore_version = ignore_version
if pre_process:
pre_process(doc)
ignore = []
if frappe.db.exists(doc.doctype, doc.name):
old_doc = frappe.get_doc(doc.doctype, doc.name)
if doc.doctype in ignore_values:
# update ignore values
for key in ignore_values.get(doc.doctype) or []:
doc.set(key, old_doc.get(key))
# update ignored docs into new doc
for df in doc.meta.get_table_fields():
if df.options in ignore_doctypes and not reset_permissions:
doc.set(df.fieldname, [])
ignore.append(df.options)
# delete old
frappe.delete_doc(doc.doctype, doc.name, force=1, ignore_doctypes=ignore, for_reload=True)
doc.flags.ignore_children_type = ignore
doc.flags.ignore_links = True
if not data_import:
doc.flags.ignore_validate = True
doc.flags.ignore_permissions = True
doc.flags.ignore_mandatory = True
doc.insert()
frappe.flags.in_import = False
| {
"content_hash": "71e63baf71d76e0e3c67aab42eeedddf",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 101,
"avg_line_length": 30.07751937984496,
"alnum_prop": 0.6904639175257732,
"repo_name": "bohlian/frappe",
"id": "ade3614c8ed1b09646847cd2c2a0f27fc5c62dfc",
"size": "3981",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/modules/import_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "406369"
},
{
"name": "HTML",
"bytes": "213728"
},
{
"name": "JavaScript",
"bytes": "1741213"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "1965275"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""Pluggable newsletter handling."""
from django import forms
from django.utils.translation import ugettext_lazy as _
from livesettings import config_value
from satchmo_store.accounts.signals import satchmo_registration
from satchmo_store.contact.signals import satchmo_contact_view
from satchmo_utils import load_module
from signals_ahoy.signals import form_initialdata
import logging
import signals
log = logging.getLogger('newsletter')
def get_newsletter_module():
try:
modulename = config_value('NEWSLETTER', 'MODULE')
except AttributeError:
modulename = 'satchmo_ext.newsletter.ignore'
return load_module(modulename)
def is_subscribed(contact):
if not contact:
return False
return get_newsletter_module().is_subscribed(contact)
def update_subscription(contact, subscribed, attributes={}):
current = is_subscribed(contact)
log.debug("Updating subscription status from %s to %s for %s", current, subscribed, contact)
result = get_newsletter_module().update_contact(contact, subscribed, attributes=attributes)
signals.newsletter_subscription_updated.send(contact,
old_state=current, new_state=subscribed, contact=contact, attributes=attributes)
return result
def update_subscription_listener(contact=None, subscribed=False, **kwargs):
if contact:
update_subscription(contact, subscribed)
def populate_form_initialdata_listener(contact=None, initial = {}, **kwargs):
if contact:
current_subscriber = is_subscribed(contact)
else:
current_subscriber = False
initial['newsletter'] = current_subscriber
def view_user_data_listener(contact=None, contact_dict=None, **kwargs):
module = config_value('NEWSLETTER', 'MODULE')
if module not in ('', 'satchmo_ext.newsletter.ignore'):
contact_dict['show_newsletter'] = True
contact_dict['newsletter'] = is_subscribed(contact)
else:
contact_dict['show_newsletter'] = False
satchmo_contact_view.connect(view_user_data_listener, sender=None)
satchmo_registration.connect(update_subscription_listener, sender=None)
form_initialdata.connect(populate_form_initialdata_listener, sender='RegistrationForm')
| {
"content_hash": "5bdc99c5b0541a552738725c28d22219",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 96,
"avg_line_length": 38.03448275862069,
"alnum_prop": 0.7411604714415231,
"repo_name": "grengojbo/satchmo",
"id": "d0f3281be2391e079e3236e6577d40287d0b4203",
"size": "2206",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "satchmo/apps/satchmo_ext/newsletter/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "73898"
},
{
"name": "Python",
"bytes": "1752948"
}
],
"symlink_target": ""
} |
PROCESS_FIELD = 1
SAMPLE_FIELD = 2
PROCESS_ATTR_FIELD = 3
SAMPLE_ATTR_FIELD = 4
PROCESS_FUNC = 5
SAMPLE_FUNC = 6
OP_EQ = "="
OP_NEQ = "<>"
OP_LT = "<"
OP_LTEQ = "<="
OP_GT = ">"
OP_GTEQ = ">="
def q_and(left, right):
return {
"and": 1,
"left": left,
"right": right
}
def q_or(left, right):
return {
"or": 1,
"left": left,
"right": right
}
def q_sample_has_process(process):
return q_sample_proc('has-process', process)
def q_sample_proc(proc, value):
return q_match('', SAMPLE_FUNC, value, proc)
def q_sample_match(field, value, operation):
return q_match(field, SAMPLE_FIELD, value, operation)
def q_sample_attr_match(field, value, operation):
return q_match(field, SAMPLE_ATTR_FIELD, value, operation)
def q_process_match(field, value, operation):
return q_match(field, PROCESS_FIELD, value, operation)
def q_process_attr_match(field, value, operation):
return q_match(field, PROCESS_ATTR_FIELD, value, operation)
def q_match(field, field_type, value, operation):
return {
"field_name": field,
"field_type": field_type,
"value": value,
"operation": operation
}
| {
"content_hash": "139268305a81b57f6b4250a31bba6e2f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 63,
"avg_line_length": 19.516129032258064,
"alnum_prop": 0.6099173553719008,
"repo_name": "materials-commons/mcapi",
"id": "3571f419ac1f42eeb7ce6f897b2a2e7f90b69afb",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "materials_commons/api/query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96124"
},
{
"name": "Shell",
"bytes": "1656"
}
],
"symlink_target": ""
} |
from indico.modules.events.abstracts.models.persons import AbstractPersonLink
from indico.modules.events.contributions.models.persons import (AuthorType, ContributionPersonLink,
SubContributionPersonLink)
from indico.modules.events.features.util import set_feature_enabled
from indico.modules.events.models.persons import EventPerson, EventPersonLink
def test_unused_event_person(db, dummy_user, dummy_event, create_contribution, create_subcontribution, create_abstract):
person = EventPerson.create_from_user(dummy_user, event=dummy_event)
assert not person.has_links
dummy_event.person_links.append(EventPersonLink(person=person))
db.session.flush()
assert person.has_links
dummy_event.person_links.clear()
db.session.flush()
assert not person.has_links
set_feature_enabled(dummy_event, 'abstracts', True)
abstract = create_abstract(dummy_event, 'Dummy abstract', submitter=dummy_user, person_links=[
AbstractPersonLink(person=person, is_speaker=True, author_type=AuthorType.primary)
])
assert person.has_links
abstract.is_deleted = True
assert not person.has_links
contrib = create_contribution(dummy_event, 'Dummy contribution', person_links=[
ContributionPersonLink(person=person, is_speaker=True)
])
assert person.has_links
contrib.is_deleted = True
assert not person.has_links
db.session.delete(contrib)
contrib = create_contribution(dummy_event, 'Dummy contribution', person_links=[])
assert not person.has_links
create_subcontribution(contrib, 'Dummy subcontribution', person_links=[
SubContributionPersonLink(person=person, is_speaker=True)
])
assert person.has_links
contrib.is_deleted = True
assert not person.has_links
db.session.delete(contrib)
assert not person.has_links
| {
"content_hash": "4363d6a1ff8071aa640c1026d32c9706",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 120,
"avg_line_length": 40.42553191489362,
"alnum_prop": 0.7263157894736842,
"repo_name": "indico/indico",
"id": "9677835d0a7c3355e595b625f042a5b97ce96c08",
"size": "2114",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/models/persons_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33289"
},
{
"name": "HTML",
"bytes": "1420471"
},
{
"name": "JavaScript",
"bytes": "2362355"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5550085"
},
{
"name": "SCSS",
"bytes": "486043"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from baseZhang import class_encoder_to_number
from sklearn.ensemble import VotingClassifier
from sklearn.externals import joblib
from preprocessData import getDataXY
trainX, trainY, testX, testY, validX, validY = getDataXY()
encoder_path = 'encoder.pkl'
if not os.path.isfile(encoder_path):
encoder = class_encoder_to_number(trainY)
joblib.dump(encoder, 'encoder.pkl')
else:
encoder = joblib.load(encoder_path)
trainY = encoder.transform(trainY)
testY = encoder.transform(testY)
validY = encoder.transform(validY)
dt_model_path = 'Models/dt0.58.pkl'
dt_model = joblib.load(dt_model_path)
nb_model_path = 'Models/NB0.59.pkl'
nb_model = joblib.load(nb_model_path)
nc_model_path = 'Models/NC0.57.pkl'
nc_model = joblib.load(nc_model_path)
nnp_model_path = 'Models/NNP0.61.pkl'
nnp_model = joblib.load(nnp_model_path)
sgd_model_path = 'Models/sgd0.54.pkl'
sgd_model = joblib.load(sgd_model_path)
voting_clf = VotingClassifier(estimators=[
('dt', dt_model), ('nb', nb_model),
('nc', nc_model), ('nnp', nnp_model), ('sgd', sgd_model)], voting='hard')
#
# encoder_path = 'voting.pkl'
# if not os.path.isfile(encoder_path):
voting_clf = voting_clf.fit(trainX, trainY)
# joblib.dump(encoder, 'voting.pkl')
# else:
# voting_clf = joblib.load(encoder_path)
voting_test_result = voting_clf.predict(testX)
voting_valid_result = voting_clf.predict(validX)
test_accuracy = np.mean(voting_test_result.ravel() == testY.ravel()) * 100
valid_accuracy = np.mean(voting_valid_result.ravel() == validY.ravel()) * 100
print test_accuracy, valid_accuracy
| {
"content_hash": "ba34cfc3983e97284ccd9f7bc90f85e2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 29.12727272727273,
"alnum_prop": 0.7209737827715356,
"repo_name": "TheaGao/SklearnModel",
"id": "3b9ada0b9fb4212f3234a94951e2f0d2613e6276",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Vote_Results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38251"
}
],
"symlink_target": ""
} |
import unittest
from jnius.reflect import autoclass
class SimpleEnum(unittest.TestCase):
def test_enum(self):
SimpleEnum = autoclass('org.jnius.SimpleEnum')
self.assertTrue(SimpleEnum)
def test_value(self):
SimpleEnum = autoclass('org.jnius.SimpleEnum')
self.assertTrue(SimpleEnum.GOOD)
self.assertTrue(SimpleEnum.BAD)
self.assertTrue(SimpleEnum.UGLY)
| {
"content_hash": "f857401c51d599550ab4c27593128046",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 25.8125,
"alnum_prop": 0.6973365617433414,
"repo_name": "jelford/pyjnius",
"id": "8c1039247be549c822d9d08fc8fcdee6676e51a9",
"size": "413",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/test_enum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "8164"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "139698"
}
],
"symlink_target": ""
} |
import struct
from .message import Message
@Message.register
class Termination(Message):
TYPE = Message.TERMINATION
TYPE_STR = 'termination'
reason_codict = {
0: "Session administratively closed. The session might be re-initiated.",
1: "Unspecified reason.",
2: "Out of resources. The router has exhausted resources available for the BMP session.",
3: "Redundant connection. The router has determined\
that this connection is redundant with another one.",
4: "Session permanently administratively closed,\
will not be re-initiated. Monitoring station should reduce\
(potentially to 0) the rate at which it attempts\
reconnection to the monitored router."
}
@classmethod
def unpack(cls, data):
infor_tlv = dict()
while data:
info_type, info_len = struct.unpack('!HH', data[0:4])
info_value = data[4: 4 + info_len]
if info_type == 0:
infor_tlv['string'] = info_value.decode('ascii')
elif info_type == 1:
infor_tlv['reason'] = cls.reason_codict[struct.unpack('!H', info_value)[0]]
data = data[4 + info_len:]
return cls(value=infor_tlv)
| {
"content_hash": "e45727701429082f9cb591f9f6599b99",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 98,
"avg_line_length": 34.4054054054054,
"alnum_prop": 0.6048703849175177,
"repo_name": "smartbgp/libbgp",
"id": "d4baf61544413573da910108cd2c2f45dbf1df74",
"size": "1910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libbgp/bmp/termination.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "135922"
}
],
"symlink_target": ""
} |
"""Representation of Avro schemas.
A schema may be one of:
- A record, mapping field names to field value data;
- An error, equivalent to a record;
- An enum, containing one of a small set of symbols;
- An array of values, all of the same schema;
- A map containing string/value pairs, each of a declared schema;
- A union of other schemas;
- A fixed sized binary object;
- A unicode string;
- A sequence of bytes;
- A 32-bit signed int;
- A 64-bit signed long;
- A 32-bit floating-point float;
- A 64-bit floating-point double;
- A boolean;
- Null.
"""
import abc
import json
import logging
import re
import sys
from six import with_metaclass
PY2 = sys.version_info[0] == 2
if PY2:
_str = unicode # pylint: disable=undefined-variable
else:
_str = str
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Constants
# Log level more verbose than DEBUG=10, INFO=20, etc.
DEBUG_VERBOSE = 5
NULL = 'null'
BOOLEAN = 'boolean'
STRING = 'string'
BYTES = 'bytes'
INT = 'int'
LONG = 'long'
FLOAT = 'float'
DOUBLE = 'double'
FIXED = 'fixed'
ENUM = 'enum'
RECORD = 'record'
ERROR = 'error'
ARRAY = 'array'
MAP = 'map'
UNION = 'union'
# Request and error unions are part of Avro protocols:
REQUEST = 'request'
ERROR_UNION = 'error_union'
PRIMITIVE_TYPES = frozenset([
NULL,
BOOLEAN,
STRING,
BYTES,
INT,
LONG,
FLOAT,
DOUBLE,
])
NAMED_TYPES = frozenset([
FIXED,
ENUM,
RECORD,
ERROR,
])
VALID_TYPES = frozenset.union(
PRIMITIVE_TYPES,
NAMED_TYPES,
[
ARRAY,
MAP,
UNION,
REQUEST,
ERROR_UNION,
],
)
SCHEMA_RESERVED_PROPS = frozenset([
'type',
'name',
'namespace',
'fields', # Record
'items', # Array
'size', # Fixed
'symbols', # Enum
'values', # Map
'doc',
])
FIELD_RESERVED_PROPS = frozenset([
'default',
'name',
'doc',
'order',
'type',
])
VALID_FIELD_SORT_ORDERS = frozenset([
'ascending',
'descending',
'ignore',
])
# ------------------------------------------------------------------------------
# Exceptions
class Error(Exception):
"""Base class for errors in this module."""
class AvroException(Error):
"""Generic Avro schema error."""
class SchemaParseException(AvroException):
"""Error while parsing a JSON schema descriptor."""
class Schema(with_metaclass(abc.ABCMeta, object)):
"""Abstract base class for all Schema classes."""
def __init__(self, data_type, other_props=None):
"""Initializes a new schema object.
Args:
data_type: Type of the schema to initialize.
other_props: Optional dictionary of additional properties.
"""
if data_type not in VALID_TYPES:
raise SchemaParseException('%r is not a valid Avro type.' % data_type)
# All properties of this schema, as a map: property name -> property value
self._props = {}
self._props['type'] = data_type
self._type = data_type
if other_props:
self._props.update(other_props)
@property
def namespace(self):
"""Returns: the namespace this schema belongs to, if any, or None."""
return self._props.get('namespace', None)
@property
def type(self):
"""Returns: the type of this schema."""
return self._type
@property
def doc(self):
"""Returns: the documentation associated to this schema, if any, or None."""
return self._props.get('doc', None)
@property
def props(self):
"""Reports all the properties of this schema.
Includes all properties, reserved and non reserved.
JSON properties of this schema are directly generated from this dict.
Returns:
A dictionary of properties associated to this schema.
"""
return self._props
@property
def other_props(self):
"""Returns: the dictionary of non-reserved properties."""
return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS))
def __str__(self):
"""Returns: the JSON representation of this schema."""
return json.dumps(self.to_json(names=None))
@abc.abstractmethod
def to_json(self, names):
"""Converts the schema object into its AVRO specification representation.
Schema types that have names (records, enums, and fixed) must
be aware of not re-defining schemas that are already listed
in the parameter names.
"""
raise Exception('Cannot run abstract method.')
# ------------------------------------------------------------------------------
_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
_RE_FULL_NAME = re.compile(
r'^'
r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace
r'([A-Za-z_][A-Za-z0-9_]*)' # name
r'$'
)
class Name(object):
"""Representation of an Avro name."""
def __init__(self, name, namespace=None):
"""Parses an Avro name.
Args:
name: Avro name to parse (relative or absolute).
namespace: Optional explicit namespace if the name is relative.
"""
# Normalize: namespace is always defined as a string, possibly empty.
if namespace is None:
namespace = ''
if '.' in name:
# name is absolute, namespace is ignored:
self._fullname = name
match = _RE_FULL_NAME.match(self._fullname)
if match is None:
raise SchemaParseException(
'Invalid absolute schema name: %r.' % self._fullname)
self._name = match.group(1)
self._namespace = self._fullname[:-(len(self._name) + 1)]
else:
# name is relative, combine with explicit namespace:
self._name = name
self._namespace = namespace
self._fullname = (self._name
if (not self._namespace) else
'%s.%s' % (self._namespace, self._name))
# Validate the fullname:
if _RE_FULL_NAME.match(self._fullname) is None:
raise SchemaParseException(
'Invalid schema name %r inferred from name %r and namespace %r.'
% (self._fullname, self._name, self._namespace))
def __eq__(self, other):
if not isinstance(other, Name):
return NotImplemented
return self.fullname == other.fullname
@property
def simple_name(self):
"""Returns: the simple name part of this name."""
return self._name
@property
def namespace(self):
"""Returns: this name's namespace, possible the empty string."""
return self._namespace
@property
def fullname(self):
"""Returns: the full name."""
return self._fullname
# ------------------------------------------------------------------------------
class Names(object):
"""Tracks Avro named schemas and default namespace during parsing."""
def __init__(self, default_namespace=None, names=None):
"""Initializes a new name tracker.
Args:
default_namespace: Optional default namespace.
names: Optional initial mapping of known named schemas.
"""
if names is None:
names = {}
self._names = names
self._default_namespace = default_namespace
@property
def names(self):
"""Returns: the mapping of known named schemas."""
return self._names
@property
def default_namespace(self):
"""Returns: the default namespace, if any, or None."""
return self._default_namespace
def new_with_default_namespace(self, namespace):
"""Creates a new name tracker from this tracker, but with a new default ns.
Args:
namespace: New default namespace to use.
Returns:
New name tracker with the specified default namespace.
"""
return Names(names=self._names, default_namespace=namespace)
def get_name(self, name, namespace=None):
"""Resolves the Avro name according to this name tracker's state.
Args:
name: Name to resolve (absolute or relative).
namespace: Optional explicit namespace.
Returns:
The specified name, resolved according to this tracker.
"""
if namespace is None:
namespace = self._default_namespace
return Name(name=name, namespace=namespace)
def get_schema(self, name, namespace=None):
"""Resolves an Avro schema by name.
Args:
name: Name (relative or absolute) of the Avro schema to look up.
namespace: Optional explicit namespace.
Returns:
The schema with the specified name, if any, or None.
"""
avro_name = self.get_name(name=name, namespace=namespace)
return self._names.get(avro_name.fullname, None)
def prune_namespace(self, properties):
"""given a properties, return properties with namespace removed if
it matches the own default namespace
"""
if self.default_namespace is None:
# I have no default -- no change
return properties
if 'namespace' not in properties:
# he has no namespace - no change
return properties
if properties['namespace'] != self.default_namespace:
# we're different - leave his stuff alone
return properties
# we each have a namespace and it's redundant. delete his.
prunable = properties.copy()
del prunable['namespace']
return prunable
def register(self, schema):
"""Registers a new named schema in this tracker.
Args:
schema: Named Avro schema to register in this tracker.
"""
if schema.fullname in VALID_TYPES:
raise SchemaParseException(
'%s is a reserved type name.' % schema.fullname)
if schema.fullname in self.names:
raise SchemaParseException(
'Avro name %r already exists.' % schema.fullname)
logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname)
self._names[schema.fullname] = schema
# ------------------------------------------------------------------------------
class NamedSchema(Schema):
"""Abstract base class for named schemas.
Named schemas are enumerated in NAMED_TYPES.
"""
def __init__(
self,
data_type,
name=None,
namespace=None,
names=None,
other_props=None,
):
"""Initializes a new named schema object.
Args:
data_type: Type of the named schema.
name: Name (absolute or relative) of the schema.
namespace: Optional explicit namespace if name is relative.
names: Tracker to resolve and register Avro names.
other_props: Optional map of additional properties of the schema.
"""
assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type)
self._avro_name = names.get_name(name=name, namespace=namespace)
super(NamedSchema, self).__init__(data_type, other_props)
names.register(self)
self._props['name'] = self.name
if self.namespace:
self._props['namespace'] = self.namespace
@property
def avro_name(self):
"""Returns: the Name object describing this schema's name."""
return self._avro_name
@property
def name(self):
return self._avro_name.simple_name
@property
def namespace(self):
return self._avro_name.namespace
@property
def fullname(self):
return self._avro_name.fullname
def name_ref(self, names):
"""Reports this schema name relative to the specified name tracker.
Args:
names: Avro name tracker to relativize this schema name against.
Returns:
This schema name, relativized against the specified name tracker.
"""
if self.namespace == names.default_namespace:
return self.name
return self.fullname
@abc.abstractmethod
def to_json(self, names):
"""Converts the schema object into its AVRO specification representation.
Schema types that have names (records, enums, and fixed) must
be aware of not re-defining schemas that are already listed
in the parameter names.
"""
raise Exception('Cannot run abstract method.')
# ------------------------------------------------------------------------------
_NO_DEFAULT = object()
class Field(object):
"""Representation of the schema of a field in a record."""
def __init__(
self,
data_type,
name,
index,
has_default,
default=_NO_DEFAULT,
order=None,
doc=None,
other_props=None
):
"""Initializes a new Field object.
Args:
data_type: Avro schema of the field.
name: Name of the field.
index: 0-based position of the field.
has_default:
default:
order:
doc:
other_props:
"""
if (not isinstance(name, _str)) or (not name):
raise SchemaParseException('Invalid record field name: %r.' % name)
if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):
raise SchemaParseException('Invalid record field order: %r.' % order)
# All properties of this record field:
self._props = {}
self._has_default = has_default
if other_props:
self._props.update(other_props)
self._index = index
self._type = self._props['type'] = data_type
self._name = self._props['name'] = name
if has_default:
self._props['default'] = default
if order is not None:
self._props['order'] = order
if doc is not None:
self._props['doc'] = doc
@property
def type(self):
"""Returns: the schema of this field."""
return self._type
@property
def name(self):
"""Returns: this field name."""
return self._name
@property
def index(self):
"""Returns: the 0-based index of this field in the record."""
return self._index
@property
def default(self):
return self._props['default']
@property
def has_default(self):
return self._has_default
@property
def order(self):
return self._props.get('order', None)
@property
def doc(self):
return self._props.get('doc', None)
@property
def props(self):
return self._props
@property
def other_props(self):
return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS)
def __str__(self):
return json.dumps(self.to_json())
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['type'] = self.type.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(_str(self))
return to_cmp == json.loads(_str(that))
# ------------------------------------------------------------------------------
# Primitive Types
class PrimitiveSchema(Schema):
"""Schema of a primitive Avro type.
Valid primitive types are defined in PRIMITIVE_TYPES.
"""
def __init__(self, data_type, other_props=None):
"""Initializes a new schema object for the specified primitive type.
Args:
data_type: Type of the schema to construct. Must be primitive.
"""
if data_type not in PRIMITIVE_TYPES:
raise AvroException('%r is not a valid primitive type.' % data_type)
super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)
@property
def name(self):
"""Returns: the simple name of this schema."""
# The name of a primitive type is the type itself.
return self.type
@property
def fullname(self):
"""Returns: the fully qualified name of this schema."""
# The full name is the simple name for primitive schema.
return self.name
def to_json(self, names=None):
if len(self.props) == 1:
return self.fullname
return self.props
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
# Complex Types (non-recursive)
class FixedSchema(NamedSchema):
def __init__(
self,
name,
namespace,
size,
names=None,
other_props=None,
):
# Ensure valid ctor args
if not isinstance(size, int):
fail_msg = 'Fixed Schema requires a valid integer for size property.'
raise AvroException(fail_msg)
super(FixedSchema, self).__init__(
data_type=FIXED,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
self._props['size'] = size
@property
def size(self):
"""Returns: the size of this fixed schema, in bytes."""
return self._props['size']
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
class EnumSchema(NamedSchema):
def __init__(
self,
name,
namespace,
symbols,
names=None,
doc=None,
other_props=None,
):
"""Initializes a new enumeration schema object.
Args:
name: Simple name of this enumeration.
namespace: Optional namespace.
symbols: Ordered list of symbols defined in this enumeration.
names:
doc:
other_props:
"""
symbols = tuple(symbols)
symbol_set = frozenset(symbols)
if (len(symbol_set) != len(symbols)
or not all(map(lambda symbol: isinstance(symbol, _str), symbols))):
raise AvroException(
'Invalid symbols for enum schema: %r.' % (symbols,))
super(EnumSchema, self).__init__(
data_type=ENUM,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
self._props['symbols'] = symbols
if doc is not None:
self._props['doc'] = doc
@property
def symbols(self):
"""Returns: the symbols defined in this enum."""
return self._props['symbols']
def to_json(self, names=None):
if names is None:
names = Names()
if self.fullname in names.names:
return self.name_ref(names)
names.names[self.fullname] = self
return names.prune_namespace(self.props)
def __eq__(self, that):
return self.props == that.props
# ------------------------------------------------------------------------------
# Complex Types (recursive)
class ArraySchema(Schema):
"""Schema of an array."""
def __init__(self, items, other_props=None):
"""Initializes a new array schema object.
Args:
items: Avro schema of the array items.
other_props:
"""
super(ArraySchema, self).__init__(
data_type=ARRAY,
other_props=other_props,
)
self._items_schema = items
self._props['items'] = items
@property
def items(self):
"""Returns: the schema of the items in this array."""
return self._items_schema
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
item_schema = self.items
to_dump['items'] = item_schema.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(_str(self))
return to_cmp == json.loads(_str(that))
# ------------------------------------------------------------------------------
class MapSchema(Schema):
"""Schema of a map."""
def __init__(self, values, other_props=None):
"""Initializes a new map schema object.
Args:
values: Avro schema of the map values.
other_props:
"""
super(MapSchema, self).__init__(
data_type=MAP,
other_props=other_props,
)
self._values_schema = values
self._props['values'] = values
@property
def values(self):
"""Returns: the schema of the values in this map."""
return self._values_schema
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = self.props.copy()
to_dump['values'] = self.values.to_json(names)
return to_dump
def __eq__(self, that):
to_cmp = json.loads(_str(self))
return to_cmp == json.loads(_str(that))
# ------------------------------------------------------------------------------
class UnionSchema(Schema):
"""Schema of a union."""
def __init__(self, schemas):
"""Initializes a new union schema object.
Args:
schemas: Ordered collection of schema branches in the union.
"""
super(UnionSchema, self).__init__(data_type=UNION)
self._schemas = tuple(schemas)
# Validate the schema branches:
# All named schema names are unique:
named_branches = tuple(
filter(lambda schema: schema.type in NAMED_TYPES, self._schemas))
unique_names = frozenset(map(lambda schema: schema.fullname, named_branches))
if len(unique_names) != len(named_branches):
raise AvroException(
'Invalid union branches with duplicate schema name:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
# Types are unique within unnamed schemas, and union is not allowed:
unnamed_branches = tuple(
filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas))
unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches))
if UNION in unique_types:
raise AvroException(
'Invalid union branches contain other unions:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
if len(unique_types) != len(unnamed_branches):
raise AvroException(
'Invalid union branches with duplicate type:%s'
% ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
@property
def schemas(self):
"""Returns: the ordered list of schema branches in the union."""
return self._schemas
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
to_dump.append(schema.to_json(names))
return to_dump
def __eq__(self, that):
to_cmp = json.loads(_str(self))
return to_cmp == json.loads(_str(that))
# ------------------------------------------------------------------------------
class ErrorUnionSchema(UnionSchema):
"""Schema representing the declared errors of a protocol message."""
def __init__(self, schemas):
"""Initializes an error-union schema.
Args:
schema: collection of error schema.
"""
# Prepend "string" to handle system errors
schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas)
super(ErrorUnionSchema, self).__init__(schemas=schemas)
def to_json(self, names=None):
if names is None:
names = Names()
to_dump = []
for schema in self.schemas:
# Don't print the system error schema
if schema.type == STRING:
continue
to_dump.append(schema.to_json(names))
return to_dump
# ------------------------------------------------------------------------------
class RecordSchema(NamedSchema):
"""Schema of a record."""
@staticmethod
def _make_field(index, field_desc, names):
"""Builds field schemas from a list of field JSON descriptors.
Args:
index: 0-based index of the field in the record.
field_desc: JSON descriptors of a record field.
Return:
The field schema.
"""
field_schema = schema_from_json_data(
json_data=field_desc['type'],
names=names,
)
other_props = (
dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))
return Field(
data_type=field_schema,
name=field_desc['name'],
index=index,
has_default=('default' in field_desc),
default=field_desc.get('default', _NO_DEFAULT),
order=field_desc.get('order', None),
doc=field_desc.get('doc', None),
other_props=other_props,
)
@staticmethod
def make_field_list(field_desc_list, names):
"""Builds field schemas from a list of field JSON descriptors.
Guarantees field name unicity.
Args:
field_desc_list: collection of field JSON descriptors.
names: Avro schema tracker.
Yields
Field schemas.
"""
for index, field_desc in enumerate(field_desc_list):
yield RecordSchema._make_field(index, field_desc, names)
@staticmethod
def _make_field_map(fields):
"""Builds the field map.
Guarantees field name unicity.
Args:
fields: iterable of field schema.
Returns:
A map of field schemas, indexed by name.
"""
field_map = {}
for field in fields:
if field.name in field_map:
raise SchemaParseException(
'Duplicate record field name %r.' % field.name)
field_map[field.name] = field
return field_map
def __init__(
self,
name,
namespace,
fields=None,
make_fields=None,
names=None,
record_type=RECORD,
doc=None,
other_props=None
):
"""Initializes a new record schema object.
Args:
name: Name of the record (absolute or relative).
namespace: Optional namespace the record belongs to, if name is relative.
fields: collection of fields to add to this record.
Exactly one of fields or make_fields must be specified.
make_fields: function creating the fields that belong to the record.
The function signature is: make_fields(names) -> ordered field list.
Exactly one of fields or make_fields must be specified.
names:
record_type: Type of the record: one of RECORD, ERROR or REQUEST.
Protocol requests are not named.
doc:
other_props:
"""
if record_type == REQUEST:
# Protocol requests are not named:
super(RecordSchema, self).__init__(
data_type=REQUEST,
other_props=other_props,
)
elif record_type in [RECORD, ERROR]:
# Register this record name in the tracker:
super(RecordSchema, self).__init__(
data_type=record_type,
name=name,
namespace=namespace,
names=names,
other_props=other_props,
)
else:
raise SchemaParseException(
'Invalid record type: %r.' % record_type)
if record_type in [RECORD, ERROR]:
avro_name = names.get_name(name=name, namespace=namespace)
nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)
elif record_type == REQUEST:
# Protocol request has no name: no need to change default namespace:
nested_names = names
if fields is None:
fields = make_fields(names=nested_names)
else:
assert make_fields is None
self._fields = tuple(fields)
self._field_map = RecordSchema._make_field_map(self._fields)
self._props['fields'] = fields
if doc is not None:
self._props['doc'] = doc
@property
def fields(self):
"""Returns: the field schemas, as an ordered tuple."""
return self._fields
@property
def field_map(self):
"""Returns: a read-only map of the field schemas index by field names."""
return self._field_map
def to_json(self, names=None):
if names is None:
names = Names()
# Request records don't have names
if self.type == REQUEST:
return [f.to_json(names) for f in self.fields]
if self.fullname in names.names:
return self.name_ref(names)
names.names[self.fullname] = self
to_dump = names.prune_namespace(self.props.copy())
to_dump['fields'] = [f.to_json(names) for f in self.fields]
return to_dump
def __eq__(self, that):
to_cmp = json.loads(_str(self))
return to_cmp == json.loads(_str(that))
# ------------------------------------------------------------------------------
# Module functions
def filter_keys_out(items, keys):
"""Filters a collection of (key, value) items.
Exclude any item whose key belongs to keys.
Args:
items: Dictionary of items to filter the keys out of.
keys: Keys to filter out.
Yields:
Filtered items.
"""
for key, value in items.items():
if key in keys:
continue
yield key, value
# ------------------------------------------------------------------------------
def _schema_from_json_string(json_string, names):
if json_string in PRIMITIVE_TYPES:
return PrimitiveSchema(data_type=json_string)
# Look for a known named schema:
schema = names.get_schema(name=json_string)
if schema is None:
raise SchemaParseException(
'Unknown named schema %r, known names: %r.'
% (json_string, sorted(names.names)))
return schema
def _schema_from_json_array(json_array, names):
def MakeSchema(desc):
return schema_from_json_data(json_data=desc, names=names)
return UnionSchema(map(MakeSchema, json_array))
def _schema_from_json_object(json_object, names):
data_type = json_object.get('type')
if data_type is None:
raise SchemaParseException(
'Avro schema JSON descriptor has no "type" property: %r' % json_object)
other_props = dict(
filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS))
if data_type in PRIMITIVE_TYPES:
# FIXME should not ignore other properties
result = PrimitiveSchema(data_type, other_props=other_props)
elif data_type in NAMED_TYPES:
name = json_object.get('name')
namespace = json_object.get('namespace', names.default_namespace)
if data_type == FIXED:
size = json_object.get('size')
result = FixedSchema(name, namespace, size, names, other_props)
elif data_type == ENUM:
symbols = json_object.get('symbols')
doc = json_object.get('doc')
result = EnumSchema(name, namespace, symbols, names, doc, other_props)
elif data_type in [RECORD, ERROR]:
field_desc_list = json_object.get('fields', ())
def MakeFields(names):
return tuple(RecordSchema.make_field_list(field_desc_list, names))
result = RecordSchema(
name=name,
namespace=namespace,
make_fields=MakeFields,
names=names,
record_type=data_type,
doc=json_object.get('doc'),
other_props=other_props,
)
else:
raise Exception('Internal error: unknown type %r.' % data_type)
elif data_type in VALID_TYPES:
# Unnamed, non-primitive Avro type:
if data_type == ARRAY:
items_desc = json_object.get('items')
if items_desc is None:
raise SchemaParseException(
'Invalid array schema descriptor with no "items" : %r.'
% json_object)
result = ArraySchema(
items=schema_from_json_data(items_desc, names),
other_props=other_props,
)
elif data_type == MAP:
values_desc = json_object.get('values')
if values_desc is None:
raise SchemaParseException(
'Invalid map schema descriptor with no "values" : %r.'
% json_object)
result = MapSchema(
values=schema_from_json_data(values_desc, names=names),
other_props=other_props,
)
elif data_type == ERROR_UNION:
error_desc_list = json_object.get('declared_errors')
assert error_desc_list is not None
error_schemas = map(
lambda desc: schema_from_json_data(desc, names=names),
error_desc_list)
result = ErrorUnionSchema(schemas=error_schemas)
else:
raise Exception('Internal error: unknown type %r.' % data_type)
else:
raise SchemaParseException(
'Invalid JSON descriptor for an Avro schema: %r' % json_object)
return result
# Parsers for the JSON data types:
_JSONDataParserTypeMap = {
_str: _schema_from_json_string,
list: _schema_from_json_array,
dict: _schema_from_json_object,
}
def schema_from_json_data(json_data, names=None):
"""Builds an Avro Schema from its JSON descriptor.
Args:
json_data: JSON data representing the descriptor of the Avro schema.
names: Optional tracker for Avro named schemas.
Returns:
The Avro schema parsed from the JSON descriptor.
Raises:
SchemaParseException: if the descriptor is invalid.
"""
if names is None:
names = Names()
# Select the appropriate parser based on the JSON data type:
parser = _JSONDataParserTypeMap.get(type(json_data))
if parser is None:
raise SchemaParseException(
'Invalid JSON descriptor for an Avro schema: %r.' % json_data)
return parser(json_data, names=names)
# ------------------------------------------------------------------------------
def parse(json_string):
"""Constructs a Schema from its JSON descriptor in text form.
Args:
json_string: String representation of the JSON descriptor of the schema.
Returns:
The parsed schema.
Raises:
SchemaParseException: on JSON parsing error,
or if the JSON descriptor is invalid.
"""
try:
json_data = json.loads(json_string)
except Exception as exn:
raise SchemaParseException(
'Error parsing schema from JSON: %r. '
'Error message: %r.'
% (json_string, exn))
# Initialize the names object
names = Names()
# construct the Avro Schema object
return schema_from_json_data(json_data, names)
| {
"content_hash": "44a4722f2b6bd22685497460b8834b69",
"timestamp": "",
"source": "github",
"line_count": 1214,
"max_line_length": 90,
"avg_line_length": 29.678747940691927,
"alnum_prop": 0.5582292533999444,
"repo_name": "Azure/azure-sdk-for-python",
"id": "34fa5980a5f5e68aefb168e613c387140cde3e74",
"size": "36374",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import numpy
from pandas import DataFrame, Series
def numpy_dot():
'''
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
'''
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
all_medals = [gold, silver, bronze]
point_values = (4,2,1)
point_totals=numpy.dot(point_values, all_medals)
olympic_points_df = DataFrame({'country_name': Series(countries), 'points': Series(point_totals)})
return olympic_points_df
'''
Official Answer:
olympic_medal_counts = {'country_name':countries,
'gold': Series(gold),
'silver': Series(silver),
'bronze': Series(bronze)}
olympic_medal_counts_df = DataFrame(olympic_medal_counts)
medal_counts = olympic_medal_counts_df [['gold','silver',bronze']]
points = numpy.dot(medal_counts,[4,2,1])
olympic_points = {'country_name': Series(countries), 'points': Series(point_totals)}
olympic_points_df = DataFrame(olympic_points)
return olympic_points_df
Alternate Approach (unfinished):
gold_points = numpy.dot(gold, 4)
silver_points = numpy.dot(silver, 2)
bronze_points = numpy.dot(bronze, 1)
''' | {
"content_hash": "e19184e2646b5b414b378c8f6e293075",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 102,
"avg_line_length": 36.161764705882355,
"alnum_prop": 0.5839772265148434,
"repo_name": "davidbroadwater/nyc-subway-datascience-project",
"id": "ab420166674504fe58961006d215036ae5b6c51d",
"size": "2459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lesson_1/olympic_points_quiz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "287289"
},
{
"name": "Python",
"bytes": "103099"
},
{
"name": "Shell",
"bytes": "681"
}
],
"symlink_target": ""
} |
import json
import requests
import mock
import gevent
from gevent import wsgi
from locust import web, runners, stats
from locust.runners import LocustRunner
from locust.main import parse_options
from .testcases import LocustTestCase
class TestWebUI(LocustTestCase):
def setUp(self):
super(TestWebUI, self).setUp()
stats.global_stats.clear_all()
parser = parse_options()[0]
options = parser.parse_args([])[0]
runners.locust_runner = LocustRunner([], options)
self._web_ui_server = wsgi.WSGIServer(('127.0.0.1', 0), web.app, log=None)
gevent.spawn(lambda: self._web_ui_server.serve_forever())
gevent.sleep(0.01)
self.web_port = self._web_ui_server.server_port
def tearDown(self):
super(TestWebUI, self).tearDown()
self._web_ui_server.stop()
def test_index(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/" % self.web_port).status_code)
def test_stats_no_data(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).status_code)
def test_stats(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(2, len(data["stats"])) # one entry plus Total
self.assertEqual("/test", data["stats"][0]["name"])
self.assertEqual("GET", data["stats"][0]["method"])
self.assertEqual(120, data["stats"][0]["avg_response_time"])
def test_stats_cache(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(2, len(data["stats"])) # one entry plus Total
# add another entry
stats.global_stats.get("/test2", "GET").log(120, 5612)
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).content)
self.assertEqual(2, len(data["stats"])) # old value should be cached now
web.request_stats.clear_cache()
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).content)
self.assertEqual(3, len(data["stats"])) # this should no longer be cached
def test_request_stats_csv(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests/csv" % self.web_port)
self.assertEqual(200, response.status_code)
def test_distribution_stats_csv(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/distribution/csv" % self.web_port)
self.assertEqual(200, response.status_code)
| {
"content_hash": "ba67aa424cf5cd0a0e5642b2854b12bc",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 109,
"avg_line_length": 41.986301369863014,
"alnum_prop": 0.6264274061990212,
"repo_name": "pglass/locust",
"id": "37814b0fe52f31d834f2560bcef18e82fcfe0e3d",
"size": "3065",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "locust/test/test_web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5270"
},
{
"name": "JavaScript",
"bytes": "4145"
},
{
"name": "Python",
"bytes": "157213"
},
{
"name": "Shell",
"bytes": "487"
}
],
"symlink_target": ""
} |
"""Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
from idlelib.configHandler import idleConf
# Warning: menudefs is altered in macosxSupport.overrideRootMenu()
# after it is determined that an OS X Aqua Tk is in use,
# which cannot be done until after Tk() is first called.
# Do not alter the 'file', 'options', or 'help' cascades here
# without altering overrideRootMenu() as well.
# TODO: Make this more robust
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New File', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('_Configure IDLE...', '<<open-config-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
default_keydefs = idleConf.GetCurrentKeySet()
| {
"content_hash": "6df1f39332463ea23c8736613b82c512",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 32.741573033707866,
"alnum_prop": 0.5724090597117364,
"repo_name": "MonicaHsu/truvaluation",
"id": "df2b251426a91f5c298e3352c80a79510558a194",
"size": "2914",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/idlelib/Bindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2785282"
},
{
"name": "C++",
"bytes": "472284"
},
{
"name": "CSS",
"bytes": "9019"
},
{
"name": "JavaScript",
"bytes": "107089"
},
{
"name": "Perl",
"bytes": "58637"
},
{
"name": "Python",
"bytes": "14350366"
},
{
"name": "Shell",
"bytes": "32063"
},
{
"name": "Tcl",
"bytes": "1349119"
}
],
"symlink_target": ""
} |
from typing import List, TypedDict
from backend.common.sitevars.sitevar import Sitevar
class ContentType(TypedDict):
websites: List[str]
class WebsiteBlacklist(Sitevar[ContentType]):
@staticmethod
def key() -> str:
return "website_blacklist"
@staticmethod
def description() -> str:
return "For blacklisting sketchy websites from team pages"
@staticmethod
def default_value() -> ContentType:
return ContentType(
websites=[],
)
@classmethod
def is_blacklisted(cls, website: str) -> bool:
website_blacklist = cls.get()
return website in website_blacklist["websites"]
@classmethod
def blacklist(cls, website: str) -> None:
def update_data(data: ContentType):
data["websites"].append(website)
return data
cls.update(
should_update=lambda v: website not in v["websites"],
update_f=update_data,
)
| {
"content_hash": "ebbc1c7331db77b7d4052a5b81363943",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 66,
"avg_line_length": 25.025641025641026,
"alnum_prop": 0.6260245901639344,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "aac8b5ffafd696a62dcea5f04c1ef3e725135808",
"size": "976",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/sitevars/website_blacklist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
""""Argo Workflow for building notebook-server-rstudio's OCI image using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/rstudio/Dockerfile",
context="components/example-notebook-servers/rstudio/",
destination=config.NOTEBOOK_SERVER_RSTUDIO)
| {
"content_hash": "14d337fc768a610fd4e72e0fb29dd75a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 93,
"avg_line_length": 46.8,
"alnum_prop": 0.6866096866096866,
"repo_name": "kubeflow/kubeflow",
"id": "18b72f4abdf20029ed71c960fbf9807486a8ac54",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_rstudio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44370"
},
{
"name": "Dockerfile",
"bytes": "32340"
},
{
"name": "Go",
"bytes": "266067"
},
{
"name": "HTML",
"bytes": "65169"
},
{
"name": "JavaScript",
"bytes": "200781"
},
{
"name": "Jsonnet",
"bytes": "7366280"
},
{
"name": "Makefile",
"bytes": "43885"
},
{
"name": "PowerShell",
"bytes": "7883"
},
{
"name": "Pug",
"bytes": "15633"
},
{
"name": "Python",
"bytes": "319086"
},
{
"name": "SCSS",
"bytes": "21397"
},
{
"name": "Shell",
"bytes": "7933"
},
{
"name": "TypeScript",
"bytes": "870299"
}
],
"symlink_target": ""
} |
"""
Common classes for local filesystem certificate handling
"""
import os
from oslo_config import cfg
from octavia.certificates.common import cert
TLS_CERT_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snakeoil.pem'
)
TLS_KEY_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key'
)
TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS')
TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT',
'insecure-key-do-not-use-this-key')
TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256')
TLS_STORAGE_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/'
)
certgen_opts = [
cfg.StrOpt('ca_certificate',
default=TLS_CERT_DEFAULT,
help='Absolute path to the CA Certificate for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_CERT].'),
cfg.StrOpt('ca_private_key',
default=TLS_KEY_DEFAULT,
help='Absolute path to the Private Key for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_KEY].'),
cfg.StrOpt('ca_private_key_passphrase',
default=TLS_PKP_DEFAULT,
help='Passphrase for the Private Key. Defaults'
' to env[OS_OCTAVIA_CA_KEY_PASS] or None.',
secret=True),
cfg.StrOpt('server_certs_key_passphrase',
default=TLS_PASS_AMPS_DEFAULT,
help='Passphrase for encrypting Amphora Certificates and '
'Private Keys. Must be 32, base64(url) compatible, '
'characters long. Defaults to env[TLS_PASS_AMPS_DEFAULT] '
'or insecure-key-do-not-use-this-key',
regex=r'^[A-Za-z0-9\-_=]{32}$',
required=True,
secret=True),
cfg.StrOpt('signing_digest',
default=TLS_DIGEST_DEFAULT,
help='Certificate signing digest. Defaults'
' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'),
cfg.IntOpt('cert_validity_time',
default=30 * 24 * 60 * 60,
help="The validity time for the Amphora Certificates "
"(in seconds)."),
]
certmgr_opts = [
cfg.StrOpt('storage_path',
default=TLS_STORAGE_DEFAULT,
help='Absolute path to the certificate storage directory. '
'Defaults to env[OS_OCTAVIA_TLS_STORAGE].')
]
class LocalCert(cert.Cert):
"""Representation of a Cert for local storage."""
def __init__(self, certificate, private_key, intermediates=None,
private_key_passphrase=None):
self.certificate = certificate
self.intermediates = intermediates
self.private_key = private_key
self.private_key_passphrase = private_key_passphrase
def get_certificate(self):
return self.certificate
def get_intermediates(self):
return self.intermediates
def get_private_key(self):
return self.private_key
def get_private_key_passphrase(self):
return self.private_key_passphrase
| {
"content_hash": "c7995f13595b2cf823b5b3a84d7b15c9",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 37.62352941176471,
"alnum_prop": 0.6022514071294559,
"repo_name": "openstack/octavia",
"id": "4db1af26968adba5710c0792c2c61b1a2a9b8417",
"size": "3835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/certificates/common/local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
} |
import netaddr
from oslo.utils import excutils
from oslo.utils import importutils
from sqlalchemy.orm import exc
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as q_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import common_db_mixin
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.db import l3_gwmode_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from vyatta.common import config
from vyatta.common import exceptions as v_exc
from vyatta.common import utils as vyatta_utils
from vyatta.vrouter import driver as vrouter_driver
LOG = logging.getLogger(__name__)
class VyattaVRouterMixin(common_db_mixin.CommonDbMixin,
extraroute_db.ExtraRoute_db_mixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Brocade Neutron L3 Plugin for Vyatta vRouter.
Supports CRUD operations on vRouter, add/remove interfaces from vRouter
and floating IPs for VMs.It performs vRouter VM lifecyle management by
calling Nova APIs during the Create and Delete Router calls.
Once the vRouter VM is up, L3 plugin uses REST API to perform the
configurations. L3 plugin supports add/remove router interfaces by
attaching the neutron ports to vRouter VM using Nova API.
RPC notifications will be used by the firewall agent that is coupled
with l3-agent. This is needed for our firewall plugin.
"""
ATTACH_PORT_RETRY_LIMIT = 5
ATTACH_PORT_RETRY_DELAY = 5
supported_extension_aliases = [
"router", "ext-gw-mode", "extraroute",
l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS]
def __init__(self):
self.setup_rpc()
self.driver = vrouter_driver.VyattaVRouterDriver()
self.router_scheduler = importutils.import_object(
config.CONF.router_scheduler_driver)
self.start_periodic_l3_agent_status_check()
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.agent_notifiers.update(
{l3_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [_VyattaL3RPCEndpoint()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def get_plugin_type(self):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
"""Returns string description of the plugin."""
return ("Brocade Vyatta Router Service Plugin for basic L3 forwarding "
"between (L2) Neutron networks and access to external "
"networks via a NAT gateway.")
def create_router(self, context, router):
"""Creates the vRouter VM using vrouter_driver.
If we encounter vRouter VM creation failure or connectivity failure
vrouter_driver will handle the appropriate exceptions and delete
the vRouter VM.
"""
LOG.debug("Vyatta vRouter Plugin::Create router: %s", router)
r = router['router']
router_id = self.driver.create_router(context)
if router_id is None:
raise q_exc.BadRequest(
resource='router',
msg=_('Vyatta vRouter creation failed'))
gw_info = r.pop(l3.EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED)
tenant_id = self._get_tenant_id_for_create(context, r)
with context.session.begin(subtransactions=True):
# noinspection PyArgumentList
router_db = l3_db.Router(id=router_id,
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
self._process_extra_attr_router_create(context, router_db, router)
router_dict = self._make_router_dict(router_db)
try:
self.driver.init_router(context, router_dict)
except (v_exc.InvalidVRouterInstance,
v_exc.InvalidInstanceConfiguration,
v_exc.VRouterConnectFailure,
v_exc.VRouterOperationError,
Exception):
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
context.session.delete(router_db)
if gw_info != attributes.ATTR_NOT_SPECIFIED:
self._update_router_gw_info(context, router_db['id'], gw_info)
router_dict[l3.EXTERNAL_GW_INFO] = gw_info
return self._make_router_dict(router_db)
def update_router(self, context, router_id, router):
LOG.debug("Vyatta vRouter Plugin::Update router: %s", router)
r = router['router']
gw_info = r.pop(l3.EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED)
if gw_info != attributes.ATTR_NOT_SPECIFIED:
self._update_router_gw_info(context, router_id, gw_info)
return super(VyattaVRouterMixin, self).update_router(
context, router_id, router)
def delete_router(self, context, router_id):
LOG.debug("Vyatta vRouter Plugin::Delete router: %s", router_id)
with context.session.begin(subtransactions=True):
router = self._get_router(context, router_id)
self._ensure_router_not_in_use(context, router_id)
# delete any gw port
device_filter = {
'device_id': [router_id],
'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_GW]
}
ports = self._core_plugin.get_ports(context.elevated(),
filters=device_filter)
if ports:
port = ports[0]
router.gw_port = None
context.session.add(router)
self._delete_router_port(context, router_id, port)
context.session.delete(router)
self.driver.delete_router(context, router_id)
self.l3_rpc_notifier.router_deleted(context, router_id)
def add_router_interface(self, context, router_id, interface_info):
LOG.debug("Vyatta vRouter Plugin::Add Router Interface. "
"router: %s; interface: %s", router_id, interface_info)
router = self._get_router(context, router_id)
self._validate_interface_info(interface_info)
port_tenant_id = None
if 'port_id' in interface_info:
# make sure port update is committed
with context.session.begin(subtransactions=True):
if 'subnet_id' in interface_info:
msg = _("Cannot specify both subnet-id and port-id")
raise q_exc.BadRequest(resource='router', msg=msg)
port = self._core_plugin._get_port(context.elevated(),
interface_info['port_id'])
if port['device_id']:
raise q_exc.PortInUse(net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
fixed_ips = [ip for ip in port['fixed_ips']]
if len(fixed_ips) != 1:
msg = _('Router port must have exactly one fixed IP')
raise q_exc.BadRequest(resource='router', msg=msg)
subnet_id = fixed_ips[0]['subnet_id']
subnet = self._core_plugin._get_subnet(context.elevated(),
subnet_id)
self._check_for_dup_router_subnet(context, router,
port['network_id'],
subnet['id'],
subnet['cidr'])
port_tenant_id = port['tenant_id']
port = self._core_plugin.update_port(
context.elevated(), port['id'], {'port': {
'tenant_id': config.VROUTER.tenant_id,
}})
port_created = False
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._core_plugin._get_subnet(context.elevated(),
subnet_id)
# Ensure the subnet has a gateway
if not subnet['gateway_ip']:
msg = _('Subnet for router interface must have a gateway IP')
raise q_exc.BadRequest(resource='router', msg=msg)
self._check_for_dup_router_subnet(context, router,
subnet['network_id'],
subnet_id,
subnet['cidr'])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
port_tenant_id = subnet['tenant_id']
port = self._core_plugin.create_port(context.elevated(), {
'port': {
'tenant_id': config.VROUTER.tenant_id,
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': '',
'device_owner': '',
'name': '',
}
})
port_created = True
try:
self._attach_port(context, router_id, port)
except Exception:
with excutils.save_and_reraise_exception():
if port_created:
try:
self._core_plugin.delete_port(context.elevated(),
port['id'])
except Exception:
LOG.exception(_LE(
'Failed to delete previously created '
'port for Vyatta vRouter.'))
port = self._core_plugin.update_port(
context.elevated(), port['id'], {'port': {
'tenant_id': port_tenant_id,
}})
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=port['id'],
router_id=router.id,
port_type=port['device_owner']
)
context.session.add(router_port)
router_interface_info = self._make_router_interface_info(
router_id, port['tenant_id'], port['id'],
port['fixed_ips'][0]['subnet_id'])
self.notify_router_interface_action(
context, router_interface_info, 'add')
return router_interface_info
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug("Vyatta vRouter Plugin::Remove Router Interface. "
"router: %s; interface_info: %s", router_id, interface_info)
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise q_exc.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port_id = interface_info['port_id']
port_db = self._core_plugin._get_port(context.elevated(), port_id)
if not (port_db['device_owner'] ==
l3_constants.DEVICE_OWNER_ROUTER_INTF and
port_db['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
if 'subnet_id' in interface_info:
port_subnet_id = port_db['fixed_ips'][0]['subnet_id']
if port_subnet_id != interface_info['subnet_id']:
raise q_exc.SubnetMismatchForPort(
port_id=port_id,
subnet_id=interface_info['subnet_id'])
subnet_id = port_db['fixed_ips'][0]['subnet_id']
subnet = self._core_plugin._get_subnet(context.elevated(),
subnet_id)
self._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
port = port_db
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
self._confirm_router_interface_not_in_use(context, router_id,
subnet_id)
subnet = self._core_plugin._get_subnet(context.elevated(),
subnet_id)
found = False
try:
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port = p
found = True
break
except exc.NoResultFound:
pass
if not found:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
port = self._core_plugin.update_port(
context.elevated(), port['id'], {'port': {
'tenant_id': config.VROUTER.tenant_id,
}})
self._delete_router_port(context, router_id, port)
router_interface_info = self._make_router_interface_info(
router_id, subnet['tenant_id'], port['id'], subnet['id'])
self.notify_router_interface_action(
context, router_interface_info, 'remove')
return router_interface_info
def _get_interface_infos(self, context, port):
LOG.debug("Vyatta vRouter Plugin::Get interface infos")
mac_address = port['mac_address']
interface_infos = []
for fip in port['fixed_ips']:
try:
subnet = self._core_plugin._get_subnet(context.elevated(),
fip['subnet_id'])
ipnet = netaddr.IPNetwork(subnet.cidr)
interface_infos.append({
'mac_address': mac_address,
'ip_address': '{0}/{1}'.format(fip['ip_address'],
ipnet.prefixlen),
'gateway_ip': subnet.gateway_ip
})
except q_exc.SubnetNotFound:
pass
return interface_infos
def _delete_router_port(self, context, router_id, port, external_gw=False):
# Get instance, deconfigure interface and detach port from it. To do
# this need to change port owner back to that instance.
LOG.debug("Vyatta vRouter Plugin::Delete router port. "
"router: %s; port: %s", router_id, port)
self.driver.deconfigure_interface(
context, router_id, self._get_interface_infos(context.elevated(),
port))
self._core_plugin.update_port(context.elevated(), port['id'],
{'port': {'device_owner': '',
'device_id': router_id}})
self.driver.detach_interface(context, router_id, port['id'])
self._core_plugin.delete_port(context.elevated(), port['id'])
def _attach_port(self, context, router_id, port, external_gw=False):
LOG.debug("Vyatta vRouter Plugin::Attach port. "
"router: %s; port: %s", router_id, port)
# Attach interface
self.driver.attach_interface(context, router_id, port['id'])
def configure_gateway_wrapper():
if external_gw:
self.driver.configure_gateway(
context, router_id,
self._get_interface_infos(context, port))
else:
self.driver.configure_interface(
context, router_id,
self._get_interface_infos(context, port))
vyatta_utils.retry(
configure_gateway_wrapper,
exceptions=(v_exc.VRouterOperationError,),
limit=self.ATTACH_PORT_RETRY_LIMIT,
delay=self.ATTACH_PORT_RETRY_DELAY)
if external_gw:
device_owner = l3_constants.DEVICE_OWNER_ROUTER_GW
else:
device_owner = l3_constants.DEVICE_OWNER_ROUTER_INTF
self._core_plugin.update_port(context.elevated(), port['id'],
{'port': {'device_owner': device_owner,
'device_id': router_id}})
def _update_router_gw_info(self, context, router_id, info, router=None):
LOG.debug("Vyatta vRouter Plugin::Update router gateway info")
router = router or self._get_router(context, router_id)
gw_port = router.gw_port
ext_ips = info.get('external_fixed_ips') if info else []
network_id = self._validate_gw_info(context, gw_port, info, ext_ips)
ext_ip_change = self._check_for_external_ip_change(
context, gw_port, ext_ips)
self._delete_current_gw_port(context, router_id, router, network_id,
ext_ip_change)
self._create_gw_port(context, router_id, router, network_id, ext_ips,
ext_ip_change)
def _delete_current_gw_port(self, context, router_id, router, new_network,
ext_ip_change):
"""Delete gw port if attached to an old network or IPs changed."""
port_requires_deletion = (
router.gw_port and
(router.gw_port['network_id'] != new_network or ext_ip_change)
)
if not port_requires_deletion:
return
admin_ctx = context.elevated()
if self.get_floatingips_count(
admin_ctx, {'router_id': [router_id]}):
raise l3.RouterExternalGatewayInUseByFloatingIp(
router_id=router_id, net_id=router.gw_port['network_id'])
gw_port = router.gw_port
self.driver.clear_gateway(
context, router_id,
self._get_interface_infos(context.elevated(),
gw_port))
with context.session.begin(subtransactions=True):
router.gw_port = None
context.session.add(router)
context.session.expire(gw_port)
self._delete_router_port(
context, router_id, gw_port, external_gw=True)
def _create_router_gw_port(self, context, router, network_id, ext_ips):
if ext_ips and len(ext_ips) > 1:
msg = _("Routers support only 1 external IP")
raise q_exc.BadRequest(resource='router', msg=msg)
gw_port = self._core_plugin.create_port(context.elevated(), {
'port': {
'tenant_id': config.VROUTER.tenant_id,
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': ext_ips or attributes.ATTR_NOT_SPECIFIED,
'device_owner': '',
'device_id': '',
'admin_state_up': True,
'name': ''
}})
if not gw_port['fixed_ips']:
self._core_plugin.delete_port(context.elevated(), gw_port['id'],
l3_port_check=False)
msg = (_('No IPs available for external network %s') %
network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
router.gw_port = self._core_plugin._get_port(context.elevated(),
gw_port['id'])
router_port = l3_db.RouterPort(
router_id=router.id,
port_id=gw_port['id'],
port_type=l3_constants.DEVICE_OWNER_ROUTER_GW
)
context.session.add(router)
context.session.add(router_port)
try:
self._attach_port(context, router['id'], gw_port,
external_gw=True)
except Exception as ex:
LOG.exception(_LE("Exception while attaching port : %s"), ex)
with excutils.save_and_reraise_exception():
try:
with context.session.begin(subtransactions=True):
router.gw_port = None
context.session.add(router)
self._core_plugin.delete_port(context.elevated(),
gw_port['id'])
except Exception:
LOG.exception(_LE('Failed to roll back changes to '
'Vyatta vRouter after external '
'gateway assignment.'))
def _update_extra_routes(self, context, router, routes):
LOG.debug(
'Vyatta vRouter Plugin::update static routes. '
'router_id={0}'.format(router['id']))
routes_old = self._get_extra_routes_by_router_id(
context, router['id'])
super(VyattaVRouterMixin, self)._update_extra_routes(
context, router, routes)
routes_new = self._get_extra_routes_by_router_id(
context, router['id'])
routes_old = self._route_rules_to_set(routes_old)
routes_new = self._route_rules_to_set(routes_new)
self.driver.update_static_routes(
context, router['id'],
tuple(routes_new - routes_old),
tuple(routes_old - routes_new))
@staticmethod
def _route_rules_to_set(rules):
result = set()
for r in rules:
result.add(vyatta_utils.RouteRule(
dest_cidr=r['destination'], next_hop=r['nexthop']))
return result
def create_floatingip(
self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
LOG.debug("Vyatta vRouter Plugin::Create floating ip")
floatingip_dict = super(VyattaVRouterMixin, self).create_floatingip(
context, floatingip,
initial_status=initial_status)
router_id = floatingip_dict['router_id']
if router_id:
self.associate_floatingip(context, router_id, floatingip_dict)
return floatingip_dict
def associate_floatingip(self, context, router_id, floatingip):
LOG.debug("Vyatta vRouter Plugin::Associate floating ip")
fixed_ip = floatingip['fixed_ip_address']
floating_ip = floatingip['floating_ip_address']
if router_id:
self.driver.assign_floating_ip(
context, router_id, floating_ip, fixed_ip)
with context.session.begin(subtransactions=True):
floatingip_db = self._get_floatingip(context, floatingip['id'])
floatingip_db['status'] = l3_constants.FLOATINGIP_STATUS_ACTIVE
def update_floatingip(self, context, floatingip_id, floatingip):
LOG.debug("Vyatta vRouter Plugin::Update floating ip")
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
floatingip_db = self._get_floatingip(context, floatingip_id)
old_floatingip = self._make_floatingip_dict(floatingip_db)
fip['tenant_id'] = floatingip_db['tenant_id']
fip['id'] = floatingip_id
fip_port_id = floatingip_db['floating_port_id']
before_router_id = floatingip_db['router_id']
self._update_fip_assoc(context, fip, floatingip_db,
self._core_plugin.get_port(
context.elevated(), fip_port_id))
if before_router_id:
self.disassociate_floatingip(
context, before_router_id, old_floatingip)
router_id = floatingip_db['router_id']
if router_id:
self.associate_floatingip(context, router_id, floatingip_db)
return self._make_floatingip_dict(floatingip_db)
def delete_floatingip(self, context, floatingip_id):
LOG.debug("Vyatta vRouter Plugin::Delete floating ip: %s",
floatingip_id)
floatingip_dict = self._get_floatingip(context, floatingip_id)
router_id = floatingip_dict['router_id']
if router_id:
self.disassociate_floatingip(context, router_id, floatingip_dict)
super(VyattaVRouterMixin, self).delete_floatingip(
context, floatingip_id)
def disassociate_floatingip(self, context, router_id, floatingip):
LOG.debug("Vyatta vRouter Plugin::Disassociate floating ip."
"router: %s; floating_ip: %s", router_id, floatingip)
fixed_ip = floatingip['fixed_ip_address']
floating_ip = floatingip['floating_ip_address']
if router_id:
self.driver.unassign_floating_ip(
context, router_id, floating_ip, fixed_ip)
with context.session.begin(subtransactions=True):
floatingip_db = self._get_floatingip(context, floatingip['id'])
floatingip_db['status'] = l3_constants.FLOATINGIP_STATUS_DOWN
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug("Vyatta vRouter Plugin::Disassociate floating ips."
"port_id: %s", port_id)
with context.session.begin(subtransactions=True):
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ips = fip_qry.filter_by(fixed_port_id=port_id)
for floating_ip in floating_ips:
self.disassociate_floatingip(
context, floating_ip['router_id'], floating_ip)
return super(VyattaVRouterMixin, self).disassociate_floatingips(
context, port_id, do_notify)
class _VyattaL3RPCEndpoint(l3_rpc.L3RpcCallback):
def sync_routers(self, context, **kwargs):
routers_list = super(_VyattaL3RPCEndpoint, self).sync_routers(
context, **kwargs)
if not routers_list:
return routers_list
routers_by_id = dict((x['id'], x) for x in routers_list)
query = context.session.query(models_v2.Port)
query = query.filter(models_v2.Port.network_id
== config.VROUTER.management_network_id)
query = query.filter(models_v2.Port.device_id.in_(routers_by_id))
need_processed = set(routers_by_id)
for port in query:
router_id = port['device_id']
try:
need_processed.remove(router_id)
except KeyError:
raise v_exc.CorruptedSystemError(
description=(
'router {0} contain multiple interface joined to '
'management network').format(router_id))
# this statement can't raise KeyError because query condition
router = routers_by_id[router_id]
try:
ip = port['fixed_ips']
ip = ip[0]
ip = ip['ip_address']
except (IndexError, KeyError):
raise v_exc.CorruptedSystemError(
description=(
'vyatta vrouter id={0} management interface have no '
'ip address').format(router_id))
router['_vyatta'] = {
'management_ip_address': ip}
if need_processed:
raise v_exc.CorruptedSystemError(
description=(
'vyatta vrouters not linked to management network: '
'{0}').format(', '.join(sorted(need_processed))))
return routers_list
| {
"content_hash": "cb61202df0d25e5d9e7b6bdb92c110b5",
"timestamp": "",
"source": "github",
"line_count": 674,
"max_line_length": 79,
"avg_line_length": 42.954005934718104,
"alnum_prop": 0.5472004421263514,
"repo_name": "Brocade-OpenSource/vrouter-plugins",
"id": "38f5f44f9b061ddd88a53124f40a38897197e5a2",
"size": "29602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vyatta/vrouter/neutron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "129957"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_managed_cassandra_data_center_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.cassandra_data_centers.list(
resource_group_name="cassandra-prod-rg",
cluster_name="cassandra-prod",
)
for item in response:
print(item)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2022-08-15-preview/examples/CosmosDBManagedCassandraDataCenterList.json
if __name__ == "__main__":
main()
| {
"content_hash": "85a093d0cc4ae0778257d1350d440698",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 163,
"avg_line_length": 34.14705882352941,
"alnum_prop": 0.7338501291989664,
"repo_name": "Azure/azure-sdk-for-python",
"id": "cc2be45795f4672b69832616b4ed5d5b7b11ebbc",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_managed_cassandra_data_center_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from dbapi import Tweet
import json
import twitter
import argparse
def tweet(twit):
tweet_to_tweet = Tweet.top()
if not tweet_to_tweet:
return
return (twit.statuses.update(status=tweet_to_tweet.content),
tweet_to_tweet)
def url_with_endpoint(end_point):
return base_url() + end_point
def base_url():
return "https://api.twitter.com/1.1/"
def oauth_credentials():
with open("credentials.json") as credentialsFile:
return json.loads(credentialsFile.readline())
def authenticate():
credentials = oauth_credentials()
consumer_key = credentials["consumer_key"]
consumer_secret = credentials["consumer_secret"]
access_key = credentials["access_key"]
access_secret = credentials["access_secret"]
auth = twitter.OAuth(access_key, access_secret,
consumer_key, consumer_secret)
return twitter.Twitter(auth=auth)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--test",
help="Don't tweet and print to standard out.",
action="store_true")
args = parser.parse_args()
if Tweet.count() == 0:
print("No tweets found.")
exit(0)
if args.test:
print(Tweet.top().content)
exit(0)
twitter_client = authenticate()
response, tweeted = tweet(twitter_client)
if response:
Tweet.pop()
print("Tweeted: \"", tweeted.content, "\"", sep="")
else:
print("Could not send tweet.")
| {
"content_hash": "3323fe2c138f256ef404af95097ff46c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 70,
"avg_line_length": 26.864406779661017,
"alnum_prop": 0.6182965299684543,
"repo_name": "harlanhaskins/Twitter-Queue-Bot",
"id": "538e3b7db4e0c612439738f8b508b035560433a1",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/tweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "557"
},
{
"name": "JavaScript",
"bytes": "2978"
},
{
"name": "Python",
"bytes": "8079"
}
],
"symlink_target": ""
} |
import os.path
import shutil
import tempfile
import yaml
from synapse.config.homeserver import HomeServerConfig
from tests import unittest
class ConfigLoadingTestCase(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
print self.dir
self.file = os.path.join(self.dir, "homeserver.yaml")
def tearDown(self):
shutil.rmtree(self.dir)
def test_load_fails_if_server_name_missing(self):
self.generate_config_and_remove_lines_containing("server_name")
with self.assertRaises(Exception):
HomeServerConfig.load_config("", ["-c", self.file])
with self.assertRaises(Exception):
HomeServerConfig.load_or_generate_config("", ["-c", self.file])
def test_generates_and_loads_macaroon_secret_key(self):
self.generate_config()
with open(self.file,
"r") as f:
raw = yaml.load(f)
self.assertIn("macaroon_secret_key", raw)
config = HomeServerConfig.load_config("", ["-c", self.file])
self.assertTrue(
hasattr(config, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key"
)
if len(config.macaroon_secret_key) < 5:
self.fail(
"Want macaroon secret key to be string of at least length 5,"
"was: %r" % (config.macaroon_secret_key,)
)
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
self.assertTrue(
hasattr(config, "macaroon_secret_key"),
"Want config to have attr macaroon_secret_key"
)
if len(config.macaroon_secret_key) < 5:
self.fail(
"Want macaroon secret key to be string of at least length 5,"
"was: %r" % (config.macaroon_secret_key,)
)
def test_load_succeeds_if_macaroon_secret_key_missing(self):
self.generate_config_and_remove_lines_containing("macaroon")
config1 = HomeServerConfig.load_config("", ["-c", self.file])
config2 = HomeServerConfig.load_config("", ["-c", self.file])
config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key)
def test_disable_registration(self):
self.generate_config()
self.add_lines_to_config([
"enable_registration: true",
"disable_registration: true",
])
# Check that disable_registration clobbers enable_registration.
config = HomeServerConfig.load_config("", ["-c", self.file])
self.assertFalse(config.enable_registration)
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
self.assertFalse(config.enable_registration)
# Check that either config value is clobbered by the command line.
config = HomeServerConfig.load_or_generate_config("", [
"-c", self.file, "--enable-registration"
])
self.assertTrue(config.enable_registration)
def generate_config(self):
HomeServerConfig.load_or_generate_config("", [
"--generate-config",
"-c", self.file,
"--report-stats=yes",
"-H", "lemurs.win"
])
def generate_config_and_remove_lines_containing(self, needle):
self.generate_config()
with open(self.file, "r") as f:
contents = f.readlines()
contents = [l for l in contents if needle not in l]
with open(self.file, "w") as f:
f.write("".join(contents))
def add_lines_to_config(self, lines):
with open(self.file, "a") as f:
for line in lines:
f.write(line + "\n")
| {
"content_hash": "468ccbb41cf50be39807f5f6cbff6b0c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 82,
"avg_line_length": 37.51456310679612,
"alnum_prop": 0.6030020703933747,
"repo_name": "TribeMedia/synapse",
"id": "161a87d7e33f5990241f03044793ceb2d75a61d4",
"size": "4466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/config/test_load.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
} |
from local import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
| {
"content_hash": "dda150657682fc6f42b6111979cdb983",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 20.733333333333334,
"alnum_prop": 0.6366559485530546,
"repo_name": "lamby/musicdb",
"id": "087add2fd8c02029f225335338c0c72d0c003326",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicdb/settings/roles/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1015"
},
{
"name": "HTML",
"bytes": "41635"
},
{
"name": "JavaScript",
"bytes": "21387"
},
{
"name": "Makefile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "109033"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
} |
import pandas as pd
import pytest
# noinspection PyUnresolvedReferences
from contrib.experimental.great_expectations_experimental.expectations.expect_queried_column_value_frequency_to_meet_threshold import (
ExpectQueriedColumnValueFrequencyToMeetThreshold,
)
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
from great_expectations.data_context import DataContext
from great_expectations.self_check.util import build_spark_validator_with_data
from great_expectations.validator.validator import (
ExpectationValidationResult,
Validator,
)
sqlite_runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(
datasource_name="my_sqlite_db_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="titanic",
runtime_parameters={"query": "SELECT * FROM titanic LIMIT 100"},
batch_identifiers={"default_identifier_name": "test_identifier"},
batch_spec_passthrough={"create_temp_table": False},
)
sqlite_batch_request: BatchRequest = BatchRequest(
datasource_name="my_sqlite_db_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="titanic",
batch_spec_passthrough={"create_temp_table": False},
)
@pytest.mark.parametrize(
"batch_request,success,observed,row_condition,warns",
[
(sqlite_runtime_batch_request, True, 0.54, None, False),
(sqlite_batch_request, True, 0.6481340441736482, None, False),
(sqlite_batch_request, False, 0.4791666666666667, 'col("Age")<18', True),
(sqlite_runtime_batch_request, True, 0.5, 'col("Age")>17', True),
],
)
@pytest.mark.slow # 3.02s
def test_expect_queried_column_value_frequency_to_meet_threshold_sqlite(
batch_request,
success,
observed,
row_condition,
warns,
titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
validator: Validator = context.get_validator(batch_request=batch_request)
if warns:
with pytest.warns(UserWarning):
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
else:
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] == success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"batch_request,query,success,observed,row_condition,warns",
[
(
sqlite_batch_request,
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM titanic) FROM titanic GROUP BY {col}",
True,
0.6481340441736482,
None,
True,
),
(
sqlite_runtime_batch_request,
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM titanic) FROM {active_batch} GROUP BY {col}",
False,
0.04112718964204113,
None,
True,
),
(
sqlite_batch_request,
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT(y) FROM wrong) FROM {active_batch} GROUP BY {col}",
True,
7.091666666666667,
None,
True,
),
(
sqlite_batch_request,
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM titanic) FROM {active_batch} GROUP BY {col}",
False,
0.2338156892612338,
'col("Age")<35',
True,
),
(
sqlite_batch_request,
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM {active_batch}) / 2 FROM {active_batch} GROUP BY {col}",
False,
0.3240670220868241,
None,
False,
),
],
)
@pytest.mark.slow # 3.92s
def test_expect_queried_column_value_frequency_to_meet_threshold_override_query_sqlite(
batch_request,
query,
success,
observed,
row_condition,
warns,
titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
validator: Validator = context.get_validator(batch_request=batch_request)
if warns:
with pytest.warns(UserWarning):
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
else:
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] == success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"success,observed,row_condition,warns",
[
(True, 0.6481340441736482, None, False),
(False, 0.4791666666666667, 'col("Age")<18', True),
(True, 0.6393939393939394, 'col("Age")>17', True),
],
)
def test_expect_queried_column_value_frequency_to_meet_threshold_spark(
success,
observed,
row_condition,
warns,
spark_session,
basic_spark_df_execution_engine,
titanic_df,
):
df: pd.DataFrame = titanic_df
validator: Validator = build_spark_validator_with_data(df, spark_session)
if warns:
with pytest.warns(UserWarning):
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
else:
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] == success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"query,success,observed,row_condition,warns",
[
(
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM {active_batch}) / 2 FROM {active_batch} GROUP BY {col}",
False,
0.3240670220868241,
None,
False,
),
(
"SELECT {col}, CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM {active_batch}) / 2 FROM {active_batch} GROUP BY {col}",
False,
0.3107287449392713,
'col("Age")<35',
True,
),
],
)
def test_expect_queried_column_value_frequency_to_meet_threshold_override_query_spark(
query,
success,
observed,
row_condition,
warns,
spark_session,
basic_spark_df_execution_engine,
titanic_df,
):
df: pd.DataFrame = titanic_df
validator: Validator = build_spark_validator_with_data(df, spark_session)
if warns:
with pytest.warns(UserWarning):
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
else:
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value="male",
threshold=0.5,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] == success and result["result"]["observed_value"] == observed
)
def test_expect_queried_column_value_frequency_to_meet_threshold_sqlite_multi_value(
titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
validator: Validator = context.get_validator(batch_request=sqlite_batch_request)
with pytest.warns(UserWarning):
result: ExpectationValidationResult = (
validator.expect_queried_column_value_frequency_to_meet_threshold(
column="Sex",
value=["male", "female"],
threshold=[0.6, 0.3],
row_condition='col("Age")>17',
condition_parser="great_expectations__experimental__",
)
)
assert result["success"] == True and result["result"]["observed_value"] == [
0.6393939393939394,
0.3606060606060606,
]
| {
"content_hash": "74665d6f99c144b190b6f379ea4655dd",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 159,
"avg_line_length": 35.888513513513516,
"alnum_prop": 0.5959710063070696,
"repo_name": "great-expectations/great_expectations",
"id": "02fa1fbf3b164ad6d503d3c0af94962fd7e3258e",
"size": "10623",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/expectations/core/test_expect_queried_column_value_frequency_to_meet_threshold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
'''
Some helper functions to assist in the plotting and wrangling of data
'''
import numpy as np
import matplotlib.pylab as plt
from matplotlib.ticker import MaxNLocator,AutoLocator
almost_black = '#262626'
def is_homogeneous(l):
''' Checks to see if a list has all identical entries
'''
for i in range(len(l) - 1):
if l[i] != l[i + 1]:
return False
return True
def min_member_class_ok(l):
''' Finds the member label with the lowest occurrence rate
'''
return (get_min_class(l)>1 and not is_homogeneous(l))
def get_min_class(l):
''' Returns the cardinality of the label set. At least two are needed.
'''
classes = {}
for i in l:
if i in classes:
classes[i] += 1
else:
classes[i] = 1
min = len(l)
for j in classes.iteritems():
if j[1] < min:
min = j[1]
return min
def setfont():
''' Sets some fonts for plotting the figures
'''
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 40}
plt.matplotlib.rc('font', **font)
def setup_plots():
setfont()
# Close any currently open MatPlotLib figures
plt.close('all')
PR_fig = plt.figure('MAP',figsize=(15,8),dpi=80)
#mng = plt.get_current_fig_manager()
#mng.resize(*mng.window.maxsize())
plt.title('Precision-Recall Performance')
plt.hold(True)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim((0,1.05))
plt.xlim((0,1.05))
return PR_fig
def fix_legend(ax=None,**kwargs):
'''Applies nice coloring to legend'''
if not ax:
ax = plt.gca()
light_grey = np.array([float(248)/float(255)]*3)
legend = ax.legend(frameon=True,fontsize=16,**kwargs)
ltext = ax.get_legend().get_texts()
for lt in ltext:
plt.setp(lt, color = almost_black)
rect = legend.get_frame()
rect.set_facecolor(light_grey)
rect.set_linewidth(0.0)
# Change the legend label colors to almost black, too
texts = legend.texts
for t in texts:
t.set_color(almost_black)
def fix_axes(ax=None):
'''
Removes top and left boxes
Lightens text
'''
if not ax:
ax = plt.gca()
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_major_locator(MaxNLocator(nbins=5,prune='lower'))
# Change the labels to the off-black
ax.xaxis.label.set_color(almost_black)
ax.yaxis.label.set_color(almost_black) | {
"content_hash": "b8bbda415cec8cf81f82ca02c6ab268d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 74,
"avg_line_length": 27.927083333333332,
"alnum_prop": 0.6020141738157404,
"repo_name": "IDEALLab/design_method_recommendation_JMD_2014",
"id": "686d2d5270132f338f0a28dff827aaec42b3b2c2",
"size": "2681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rec_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35751"
}
],
"symlink_target": ""
} |
'''
Created on 13/01/2014
@author: Dani
'''
class FileWriter(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
pass
@staticmethod
def write_text_to_file(text, file_name):
fileStream = open(str(file_name), "w")
fileStream.write(text)
fileStream.close()
@staticmethod
def write_binary_to_file(text, file_name):
fileStream = open(str(file_name), "wb")
fileStream.write(text)
fileStream.close() | {
"content_hash": "956a1c3c191a19bac610e683cc568f29",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 47,
"avg_line_length": 18.379310344827587,
"alnum_prop": 0.5440900562851783,
"repo_name": "landportal/landbook-importers",
"id": "0ee824011da6276136330954e9df708d034f454e",
"size": "533",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "old-importers/UNDPExtractor/es/weso/util/file_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "518503"
},
{
"name": "Shell",
"bytes": "15185"
}
],
"symlink_target": ""
} |
import cherrypy
from girder.utility import config, server
config.loadConfig() # Read the config files first, so we can override some values
cherrypy.config.update({'engine.autoreload.on': False,
'environment': 'embedded'})
cherrypy.config['server'].update({'cherrypy_server': False,
'disable_event_daemon': True})
# 'application' is the default callable object for WSGI implementations, see PEP 3333 for more.
server.setup()
application = cherrypy.tree
cherrypy.server.unsubscribe()
cherrypy.engine.start()
| {
"content_hash": "06b7effc018447bef271f7ae9cc653d5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 95,
"avg_line_length": 37.93333333333333,
"alnum_prop": 0.6959578207381371,
"repo_name": "data-exp-lab/girder",
"id": "f27b9cf3b48ab0c1236bdc368f41d63a7b20e277",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girder/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "42365"
},
{
"name": "CSS",
"bytes": "61237"
},
{
"name": "Dockerfile",
"bytes": "2416"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "170299"
},
{
"name": "JavaScript",
"bytes": "1399182"
},
{
"name": "Mako",
"bytes": "8756"
},
{
"name": "Python",
"bytes": "2388013"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10593"
},
{
"name": "Shell",
"bytes": "7661"
}
],
"symlink_target": ""
} |
"""Unit test cases for list.py."""
from typing import Dict, Tuple
from unittest import mock
from click.testing import CliRunner
from feeds.commands.list import list_command
from feeds.tests.fixtures import * # pylint: disable=wildcard-import
from feeds.tests.fixtures import TEMP_EXPORT_CSV_FILE
from feeds.tests.fixtures import TEMP_EXPORT_JSON_FILE
from feeds.tests.fixtures import TEMP_EXPORT_TXT_FILE
from mock_test_utility import MockResponse
runner = CliRunner()
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_no_feeds_found(mock_client: mock.MagicMock,
list_empty_feeds_data: Dict[str, str]) -> None:
"""Check for empty list of feeds.
Args:
mock_client (mock.MagicMock): Mock object
list_empty_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
MockResponse(status_code=200, text="""{"feedSourceTypeSchemas": []}"""),
list_empty_feeds_data
]
# Method Call
result = runner.invoke(list_command)
assert result.output == "No feeds found.\n"
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_200(mock_client: mock.MagicMock, get_feed_schema: Dict[str, str],
list_feeds_data: Dict[str, str]) -> None:
"""Test case to check response for 200 response code.
Args:
mock_client (mock.MagicMock): Mock object
get_feed_schema (Tuple): Test input data
list_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
get_feed_schema, list_feeds_data
]
# Method Call
result = runner.invoke(list_command)
assert """
Feed Details:
ID: 123
Display Name: Dummy feed display name
Source type: Dummy Source Type
Log type: Dummy LogType
State: INACTIVE
Feed Settings:
Field 1: abc.dummy.com
Field 2: ID
Namespace: sample_namespace
Labels:
k: v
============================================================
""" in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_empty_schema(mock_client: mock.MagicMock,
list_feeds_data: Dict[str, str]) -> None:
"""Test case to check response for empty schema.
Args:
mock_client (mock.MagicMock): Mock object
list_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
MockResponse(status_code=200, text="""{"feedSourceTypeSchemas": []}"""),
list_feeds_data
]
# Method Call
result = runner.invoke(list_command)
assert "Schema Not Found." in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_credential_file_invalid(mock_client: mock.MagicMock) -> None:
"""Test case for checking invalid credential path.
Args:
mock_client (mock.MagicMock): Mock object
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = OSError(
"Credential Path not found.")
# Method Call
expected_message = "Failed with exception: Credential Path not found."
result = runner.invoke(list_command,
["--credential_file", "dummy.json", "--region", "us"])
assert expected_message in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_export_csv(mock_client: mock.MagicMock,
get_feed_schema: Dict[str, str],
list_feeds_data: Dict[str, str]) -> None:
"""Test case to check feed list details exported in csv format.
Args:
mock_client (mock.MagicMock): Mock object
get_feed_schema (Tuple): Test input data
list_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
get_feed_schema, list_feeds_data
]
# Method Call
result = runner.invoke(
list_command,
["--export", TEMP_EXPORT_CSV_FILE[:-4], "--file-format", "csv"])
assert "Feed list details exported successfully" in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_export_txt(mock_client: mock.MagicMock,
get_feed_schema: Dict[str, str],
list_feeds_data: Dict[str, str]) -> None:
"""Test case to check feed list details exported in txt format.
Args:
mock_client (mock.MagicMock): Mock object
get_feed_schema (Tuple): Test input data
list_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
get_feed_schema, list_feeds_data
]
# Method Call
result = runner.invoke(
list_command,
["--export", TEMP_EXPORT_TXT_FILE[:-4], "--file-format", "txt"])
assert "Feed list details exported successfully" in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_error_code(
mock_client: mock.MagicMock, list_error_feeds_data: Tuple[Dict[str, str],
str]) -> None:
"""Check for empty list of feeds.
Args:
mock_client (mock.MagicMock): Mock object
list_error_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
MockResponse(status_code=200, text="""{"feedSourceTypeSchemas": []}"""),
list_error_feeds_data
]
# Method Call
result = runner.invoke(list_command)
assert "Failed to find feeds." in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_key_missing(mock_client: mock.MagicMock,
get_feed_schema: Dict[str, str],
list_missing_key_feeds_data: Dict[str, str]) -> None:
"""Test case to check response for 200 response code.
Args:
mock_client (mock.MagicMock): Mock object
get_feed_schema (Tuple): Test input data
list_missing_key_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
get_feed_schema, list_missing_key_feeds_data
]
# Method Call
result = runner.invoke(list_command)
assert "Field 1: abc.dummy.com" in result.output
@mock.patch(
"feeds.feed_schema_utility.chronicle_auth.initialize_http_session"
)
def test_list_export_json(mock_client: mock.MagicMock,
get_feed_schema: Dict[str, str],
list_feeds_data: Dict[str, str]) -> None:
"""Test case to check feed list details exported in JSON format.
Args:
mock_client (mock.MagicMock): Mock object
get_feed_schema (Tuple): Test input data
list_feeds_data (Tuple): Test input data
"""
mock_client.return_value = mock.Mock()
mock_client.return_value.request.side_effect = [
get_feed_schema, list_feeds_data
]
# Method Call
result = runner.invoke(
list_command,
["--export", TEMP_EXPORT_JSON_FILE[:-5], "--file-format", "json"])
assert "Feed list details exported successfully" in result.output
| {
"content_hash": "bc7003f1cabb546251833758c4d587b4",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 31.485106382978724,
"alnum_prop": 0.6609001216380592,
"repo_name": "chronicle/cli",
"id": "a7a2030cc5089f4d713ddce62ae60ee6c68573bf",
"size": "7975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "feeds/commands/list_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303761"
}
],
"symlink_target": ""
} |
import argparse
import math
import os.path
import pickle
import re
import sys
import time
from nltk.translate import bleu_score
import numpy
import six
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import chainermn
import europal
def cached_call(fname, func, *args):
if os.path.exists(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
else:
# not yet cached
val = func(*args)
with open(fname, 'wb') as f:
pickle.dump(val, f)
return val
def read_source(in_dir, cache=None):
en_path = os.path.join(in_dir, 'giga-fren.release2.fixed.en')
source_vocab = ['<eos>', '<unk>'] + europal.count_words(en_path)
source_data = europal.make_dataset(en_path, source_vocab)
return source_vocab, source_data
def read_target(in_dir, cahce=None):
fr_path = os.path.join(in_dir, 'giga-fren.release2.fixed.fr')
target_vocab = ['<eos>', '<unk>'] + europal.count_words(fr_path)
target_data = europal.make_dataset(fr_path, target_vocab)
return target_vocab, target_data
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0, force_tuple=True)
return exs
class Seq2seq(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Seq2seq, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units),
embed_y=L.EmbedID(n_target_vocab, n_units),
encoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1),
decoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1),
W=L.Linear(n_units, n_target_vocab),
)
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
ys = inputs[len(inputs) // 2:]
xs = [x[::-1] for x in xs]
eos = self.xp.zeros(1, self.xp.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# Both xs and ys_in are lists of arrays.
exs = sequence_embed(self.embed_x, xs)
eys = sequence_embed(self.embed_y, ys_in)
batch = len(xs)
# None represents a zero vector in an encoder.
hx, cx, _ = self.encoder(None, None, exs)
_, _, os = self.decoder(hx, cx, eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
reporter.report({'loss': loss.data}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.data * batch / n_words)
reporter.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Initial hidden variable and cell variable
# zero = self.xp.zeros((self.n_layers, batch, self.n_units), self.xp.float32) # NOQA
# h, c, _ = self.encoder(zero, zero, exs, train=False) # NOQA
h, c, _ = self.encoder(None, None, exs)
ys = self.xp.zeros(batch, self.xp.int32)
result = []
for i in range(max_length):
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
h, c, ys = self.decoder(h, c, eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
result = cuda.to_cpu(self.xp.stack(result).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == 0)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
return tuple(
to_device_batch([x for x, _ in batch]) +
to_device_batch([y for _, y in batch]))
class CalculateBleu(chainer.training.Extension):
# priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, batch=100, device=-1, max_length=100):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def __call__(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({self.key: bleu})
class BleuEvaluator(extensions.Evaluator):
def __init__(self, model, test_data, device=-1, batch=100,
max_length=100, comm=None):
super(BleuEvaluator, self).__init__({'main': None}, model)
self.model = model
self.test_data = test_data
self.batch = batch
self.device = device
self.max_length = max_length
self.comm = comm
def evaluate(self):
bt = time.time()
with chainer.no_backprop_mode():
references = []
hypotheses = []
observation = {}
with reporter.report_scope(observation):
for i in range(0, len(self.test_data), self.batch):
src, trg = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in trg])
src = [chainer.dataset.to_device(self.device, x)
for x in src]
ys = [y.tolist()
for y in self.model.translate(src, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({'bleu': bleu}, self.model)
et = time.time()
if self.comm is not None:
# This evaluator is called via chainermn.MultiNodeEvaluator
for i in range(0, self.comm.size):
print('BleuEvaluator::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
self.comm.mpi_comm.Barrier()
else:
# This evaluator is called from a conventional
# Chainer exntension
print('BleuEvaluator(single)::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
return observation
def create_optimizer(opt_arg):
"""Parse a string and get an optimizer.
The syntax is:
opt(params...)
where
opt := sgd | adam
param := [float | key=val]...
"""
m = re.match(r'(adam|sgd)\(([^)]*)\)', opt_arg, re.I)
name = m.group(1).lower()
args = m.group(2)
names_dict = {
'adadelta': chainer.optimizers.AdaDelta,
'adagrad': chainer.optimizers.AdaGrad,
'adam': chainer.optimizers.Adam,
'momentumsgd': chainer.optimizers.MomentumSGD,
'nesterovag': chainer.optimizers.NesterovAG,
'rmsprop': chainer.optimizers.RMSprop,
'rmspropgraves': chainer.optimizers.RMSpropGraves,
'sgd': chainer.optimizers.SGD,
'smorms3': chainer.optimizers.SMORMS3,
}
try:
opt = names_dict[name]
except KeyError:
raise RuntimeError('Unknown optimizer: \'{}\' in \'{}\''.format(
name, opt_arg))
# positional arguments
pos = []
# keyword arguments
kw = {}
args = args.strip()
if args:
for a in re.split(r',\s*', args):
if a.find('=') >= 0:
key, val = a.split('=')
kw[key] = float(val)
else:
pos.append(float(a))
return opt(*pos, **kw)
def _get_num_split(excp):
"""Get the preferrable number of split from a DataSizeError error"""
ps = excp.pickled_size
mx = excp.max_size
return (ps + mx - 1) // mx
def _slices(excp):
"""Get a list of slices that are expected to fit in a single send/recv."""
ds = excp.dataset_size
nsplit = _get_num_split(excp)
size = math.ceil(ds / nsplit)
return [(b, min(e, ds)) for b, e in
((i * size, (i + 1) * size) for i in range(0, nsplit))]
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--bleu', action='store_true', default=False,
help='Report BLEU score')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--cache', '-c', default=None,
help='Directory to cache pre-processed dataset')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='Number of units')
parser.add_argument('--communicator', default='hierarchical',
help='Type of communicator')
parser.add_argument('--stop', '-s', type=str, default='15e',
help='Stop trigger (ex. "500i", "15e")')
parser.add_argument('--input', '-i', type=str, default='wmt',
help='Input directory')
parser.add_argument('--optimizer', type=str, default='adam()',
help='Optimizer and its argument')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
args = parser.parse_args()
# Prepare ChainerMN communicator
if args.gpu:
comm = chainermn.create_communicator('hierarchical')
dev = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
dev = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('==========================================')
# Rank 0 prepares all data
if comm.rank == 0:
if args.cache and not os.path.exists(args.cache):
os.mkdir(args.cache)
# Read source data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'source.pickle')
source_vocab, source_data = cached_call(cache_file,
read_source,
args.input, args.cache)
else:
source_vocab, source_data = read_source(args.input, args.cache)
et = time.time()
print('RD source done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
# Read target data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'target.pickle')
target_vocab, target_data = cached_call(cache_file,
read_target,
args.input, args.cache)
else:
target_vocab, target_data = read_target(args.input, args.cache)
et = time.time()
print('RD target done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
print('Original training data size: %d' % len(source_data))
train_data = [(s, t)
for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) < 50 and 0 < len(t) < 50]
print('Filtered training data size: %d' % len(train_data))
en_path = os.path.join(args.input, 'dev', 'newstest2013.en')
source_data = europal.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, 'dev', 'newstest2013.fr')
target_data = europal.make_dataset(fr_path, target_vocab)
assert(len(source_data) == len(target_data))
test_data = [(s, t) for s, t
in six.moves.zip(source_data, target_data)
if 0 < len(s) and 0 < len(t)]
source_ids = {word: index
for index, word in enumerate(source_vocab)}
target_ids = {word: index
for index, word in enumerate(target_vocab)}
else:
# target_data, source_data = None, None
train_data, test_data = None, None
target_ids, source_ids = None, None
# Print GPU id
for i in range(0, comm.size):
if comm.rank == i:
print('Rank {} GPU: {}'.format(comm.rank, dev))
sys.stdout.flush()
comm.mpi_comm.Barrier()
# broadcast id- > word dictionary
source_ids = comm.bcast_obj(source_ids, root=0)
target_ids = comm.bcast_obj(target_ids, root=0)
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
if comm.rank == 0:
print('target_words : {}'.format(len(target_words)))
print('source_words : {}'.format(len(source_words)))
model = Seq2seq(3, len(source_ids), len(target_ids), args.unit)
if dev >= 0:
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)
# determine the stop trigger
m = re.match(r'^(\d+)e$', args.stop)
if m:
trigger = (int(m.group(1)), 'epoch')
else:
m = re.match(r'^(\d+)i$', args.stop)
if m:
trigger = (int(m.group(1)), 'iteration')
else:
if comm.rank == 0:
sys.stderr.write('Error: unknown stop trigger: {}'.format(
args.stop))
exit(-1)
if comm.rank == 0:
print('Trigger: {}'.format(trigger))
optimizer = chainermn.create_multi_node_optimizer(
create_optimizer(args.optimizer), comm)
optimizer.setup(model)
# Broadcast dataset
# Sanity check of train_data
train_data = chainermn.scatter_dataset(train_data, comm)
test_data = chainermn.scatter_dataset(test_data, comm)
train_iter = chainer.iterators.SerialIterator(train_data,
args.batchsize,
shuffle=False)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=dev)
trainer = training.Trainer(updater,
trigger,
out=args.out)
trainer.extend(chainermn.create_multi_node_evaluator(
BleuEvaluator(model, test_data, device=dev, comm=comm),
comm))
def translate_one(source, target):
words = europal.split_sentence(source)
print('# source : ' + ' '.join(words))
x = model.xp.array(
[source_ids.get(w, 1) for w in words], numpy.int32)
ys = model.translate([x])[0]
words = [target_words[y] for y in ys]
print('# result : ' + ' '.join(words))
print('# expect : ' + target)
# @chainer.training.make_extension(trigger=(200, 'iteration'))
def translate(trainer):
translate_one(
'Who are we ?',
'Qui sommes-nous?')
translate_one(
'And it often costs over a hundred dollars ' +
'to obtain the required identity card .',
'Or, il en coûte souvent plus de cent dollars ' +
'pour obtenir la carte d\'identité requise.')
source, target = test_data[numpy.random.choice(len(test_data))]
source = ' '.join([source_words.get(i, '') for i in source])
target = ' '.join([target_words.get(i, '') for i in target])
translate_one(source, target)
if comm.rank == 0:
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')),
trigger=(1, 'epoch'))
report = extensions.PrintReport(['epoch',
'iteration',
'main/loss',
'main/perp',
'validation/main/bleu',
'elapsed_time'])
trainer.extend(report, trigger=(1, 'epoch'))
comm.mpi_comm.Barrier()
if comm.rank == 0:
print('start training')
sys.stdout.flush()
trainer.run()
if __name__ == '__main__':
main()
| {
"content_hash": "a3ae9d88a135b7ee29da7597e06e0e78",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 101,
"avg_line_length": 35.703275529865124,
"alnum_prop": 0.5366432811656773,
"repo_name": "tkerola/chainer",
"id": "8b74aea8236ff8f380889400e6f3ddefb618f6d7",
"size": "18551",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/chainermn/seq2seq/seq2seq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
} |
from random import randint, getrandbits, choice
import pytest
from netaddr import IPAddress, IPNetwork
from pyASA.rule import RuleTCPUDP, ServiceComparator
from test.online import settings
@pytest.mark.skipif(not settings.online, reason="ASA not available for online tests")
class Test_ACL(object):
@pytest.fixture(scope="class")
def asa(self):
asa = settings.asa
if asa.acl.exists(settings.test_acl):
asa.acl.delete_rules(settings.test_acl)
yield settings.asa
# if asa.acl.exists(settings.test_acl):
# settings.asa.acl.delete_rules(settings.test_acl)
def test_test_connection(self, asa):
asa.test_connection()
def test_append_rule(self, asa):
rule = RuleTCPUDP()
rule.src = IPAddress(randint(0, 4294967295))
rule.dst = IPAddress(randint(0, 4294967295))
rule.src_port = randint(1, 65535)
rule.dst_port = randint(1, 65535)
asa.acl.append_rule(settings.test_acl, rule)
@pytest.mark.slow
def test_append_rules(self, asa):
rules = []
for i in range(1, 351):
protocol = choice(["tcp", "udp"])
if bool(getrandbits(1)):
src = IPAddress(randint(0, 4294967295))
else:
src = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}").cidr
if bool(getrandbits(1)):
dst = IPAddress(randint(0, 4294967295))
else:
dst = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}").cidr
dst_port = randint(1, 65535)
src_comp = choice([comp for comp in ServiceComparator])
dst_comp = choice([comp for comp in ServiceComparator])
rule = RuleTCPUDP(protocol=protocol, src=src, dst=dst, src_port=i, dst_port=dst_port,
src_comp=src_comp, dst_comp=dst_comp)
rules.append(rule)
asa.acl.append_rules(settings.test_acl, rules)
| {
"content_hash": "447e67b259d8de0837580c926bd2cb15",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 97,
"avg_line_length": 39.1764705882353,
"alnum_prop": 0.6081081081081081,
"repo_name": "xpac1985/pyASA",
"id": "48b6f889a7ee4b3c06412830b10fbecf0be4f00a",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/online/ACL_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129153"
}
],
"symlink_target": ""
} |
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import vpn_tunnel_pb2
from google3.cloud.graphite.mmv2.services.google.compute import vpn_tunnel_pb2_grpc
from typing import List
class VpnTunnel(object):
def __init__(
self,
id: int = None,
name: str = None,
description: str = None,
location: str = None,
target_vpn_gateway: str = None,
vpn_gateway: str = None,
vpn_gateway_interface: int = None,
peer_external_gateway: str = None,
peer_external_gateway_interface: int = None,
peer_gcp_gateway: str = None,
router: str = None,
peer_ip: str = None,
shared_secret: str = None,
shared_secret_hash: str = None,
status: str = None,
self_link: str = None,
ike_version: int = None,
detailed_status: str = None,
local_traffic_selector: list = None,
remote_traffic_selector: list = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.location = location
self.target_vpn_gateway = target_vpn_gateway
self.vpn_gateway = vpn_gateway
self.vpn_gateway_interface = vpn_gateway_interface
self.peer_external_gateway = peer_external_gateway
self.peer_external_gateway_interface = peer_external_gateway_interface
self.peer_gcp_gateway = peer_gcp_gateway
self.router = router
self.peer_ip = peer_ip
self.shared_secret = shared_secret
self.ike_version = ike_version
self.local_traffic_selector = local_traffic_selector
self.remote_traffic_selector = remote_traffic_selector
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = vpn_tunnel_pb2_grpc.ComputeVpnTunnelServiceStub(channel.Channel())
request = vpn_tunnel_pb2.ApplyComputeVpnTunnelRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.target_vpn_gateway):
request.resource.target_vpn_gateway = Primitive.to_proto(
self.target_vpn_gateway
)
if Primitive.to_proto(self.vpn_gateway):
request.resource.vpn_gateway = Primitive.to_proto(self.vpn_gateway)
if Primitive.to_proto(self.vpn_gateway_interface):
request.resource.vpn_gateway_interface = Primitive.to_proto(
self.vpn_gateway_interface
)
if Primitive.to_proto(self.peer_external_gateway):
request.resource.peer_external_gateway = Primitive.to_proto(
self.peer_external_gateway
)
if Primitive.to_proto(self.peer_external_gateway_interface):
request.resource.peer_external_gateway_interface = Primitive.to_proto(
self.peer_external_gateway_interface
)
if Primitive.to_proto(self.peer_gcp_gateway):
request.resource.peer_gcp_gateway = Primitive.to_proto(
self.peer_gcp_gateway
)
if Primitive.to_proto(self.router):
request.resource.router = Primitive.to_proto(self.router)
if Primitive.to_proto(self.peer_ip):
request.resource.peer_ip = Primitive.to_proto(self.peer_ip)
if Primitive.to_proto(self.shared_secret):
request.resource.shared_secret = Primitive.to_proto(self.shared_secret)
if Primitive.to_proto(self.ike_version):
request.resource.ike_version = Primitive.to_proto(self.ike_version)
if Primitive.to_proto(self.local_traffic_selector):
request.resource.local_traffic_selector.extend(
Primitive.to_proto(self.local_traffic_selector)
)
if Primitive.to_proto(self.remote_traffic_selector):
request.resource.remote_traffic_selector.extend(
Primitive.to_proto(self.remote_traffic_selector)
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeVpnTunnel(request)
self.id = Primitive.from_proto(response.id)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.location = Primitive.from_proto(response.location)
self.target_vpn_gateway = Primitive.from_proto(response.target_vpn_gateway)
self.vpn_gateway = Primitive.from_proto(response.vpn_gateway)
self.vpn_gateway_interface = Primitive.from_proto(
response.vpn_gateway_interface
)
self.peer_external_gateway = Primitive.from_proto(
response.peer_external_gateway
)
self.peer_external_gateway_interface = Primitive.from_proto(
response.peer_external_gateway_interface
)
self.peer_gcp_gateway = Primitive.from_proto(response.peer_gcp_gateway)
self.router = Primitive.from_proto(response.router)
self.peer_ip = Primitive.from_proto(response.peer_ip)
self.shared_secret = Primitive.from_proto(response.shared_secret)
self.shared_secret_hash = Primitive.from_proto(response.shared_secret_hash)
self.status = VpnTunnelStatusEnum.from_proto(response.status)
self.self_link = Primitive.from_proto(response.self_link)
self.ike_version = Primitive.from_proto(response.ike_version)
self.detailed_status = Primitive.from_proto(response.detailed_status)
self.local_traffic_selector = Primitive.from_proto(
response.local_traffic_selector
)
self.remote_traffic_selector = Primitive.from_proto(
response.remote_traffic_selector
)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = vpn_tunnel_pb2_grpc.ComputeVpnTunnelServiceStub(channel.Channel())
request = vpn_tunnel_pb2.DeleteComputeVpnTunnelRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.target_vpn_gateway):
request.resource.target_vpn_gateway = Primitive.to_proto(
self.target_vpn_gateway
)
if Primitive.to_proto(self.vpn_gateway):
request.resource.vpn_gateway = Primitive.to_proto(self.vpn_gateway)
if Primitive.to_proto(self.vpn_gateway_interface):
request.resource.vpn_gateway_interface = Primitive.to_proto(
self.vpn_gateway_interface
)
if Primitive.to_proto(self.peer_external_gateway):
request.resource.peer_external_gateway = Primitive.to_proto(
self.peer_external_gateway
)
if Primitive.to_proto(self.peer_external_gateway_interface):
request.resource.peer_external_gateway_interface = Primitive.to_proto(
self.peer_external_gateway_interface
)
if Primitive.to_proto(self.peer_gcp_gateway):
request.resource.peer_gcp_gateway = Primitive.to_proto(
self.peer_gcp_gateway
)
if Primitive.to_proto(self.router):
request.resource.router = Primitive.to_proto(self.router)
if Primitive.to_proto(self.peer_ip):
request.resource.peer_ip = Primitive.to_proto(self.peer_ip)
if Primitive.to_proto(self.shared_secret):
request.resource.shared_secret = Primitive.to_proto(self.shared_secret)
if Primitive.to_proto(self.ike_version):
request.resource.ike_version = Primitive.to_proto(self.ike_version)
if Primitive.to_proto(self.local_traffic_selector):
request.resource.local_traffic_selector.extend(
Primitive.to_proto(self.local_traffic_selector)
)
if Primitive.to_proto(self.remote_traffic_selector):
request.resource.remote_traffic_selector.extend(
Primitive.to_proto(self.remote_traffic_selector)
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeVpnTunnel(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = vpn_tunnel_pb2_grpc.ComputeVpnTunnelServiceStub(channel.Channel())
request = vpn_tunnel_pb2.ListComputeVpnTunnelRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListComputeVpnTunnel(request).items
def to_proto(self):
resource = vpn_tunnel_pb2.ComputeVpnTunnel()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.target_vpn_gateway):
resource.target_vpn_gateway = Primitive.to_proto(self.target_vpn_gateway)
if Primitive.to_proto(self.vpn_gateway):
resource.vpn_gateway = Primitive.to_proto(self.vpn_gateway)
if Primitive.to_proto(self.vpn_gateway_interface):
resource.vpn_gateway_interface = Primitive.to_proto(
self.vpn_gateway_interface
)
if Primitive.to_proto(self.peer_external_gateway):
resource.peer_external_gateway = Primitive.to_proto(
self.peer_external_gateway
)
if Primitive.to_proto(self.peer_external_gateway_interface):
resource.peer_external_gateway_interface = Primitive.to_proto(
self.peer_external_gateway_interface
)
if Primitive.to_proto(self.peer_gcp_gateway):
resource.peer_gcp_gateway = Primitive.to_proto(self.peer_gcp_gateway)
if Primitive.to_proto(self.router):
resource.router = Primitive.to_proto(self.router)
if Primitive.to_proto(self.peer_ip):
resource.peer_ip = Primitive.to_proto(self.peer_ip)
if Primitive.to_proto(self.shared_secret):
resource.shared_secret = Primitive.to_proto(self.shared_secret)
if Primitive.to_proto(self.ike_version):
resource.ike_version = Primitive.to_proto(self.ike_version)
if Primitive.to_proto(self.local_traffic_selector):
resource.local_traffic_selector.extend(
Primitive.to_proto(self.local_traffic_selector)
)
if Primitive.to_proto(self.remote_traffic_selector):
resource.remote_traffic_selector.extend(
Primitive.to_proto(self.remote_traffic_selector)
)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class VpnTunnelStatusEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return vpn_tunnel_pb2.ComputeVpnTunnelStatusEnum.Value(
"ComputeVpnTunnelStatusEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return vpn_tunnel_pb2.ComputeVpnTunnelStatusEnum.Name(resource)[
len("ComputeVpnTunnelStatusEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| {
"content_hash": "d84bf6ff99f192865fc06d6bde68f338",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 85,
"avg_line_length": 41.43564356435643,
"alnum_prop": 0.6444444444444445,
"repo_name": "GoogleCloudPlatform/declarative-resource-client-library",
"id": "40bf5c89785caa9897659692b65405aeecdeaf4f",
"size": "13154",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/services/compute/vpn_tunnel.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2560"
},
{
"name": "C++",
"bytes": "3947"
},
{
"name": "Go",
"bytes": "116489733"
},
{
"name": "Python",
"bytes": "17240408"
},
{
"name": "Starlark",
"bytes": "319733"
}
],
"symlink_target": ""
} |
"""Perform DOI activation task."""
class HSTaskRouter(object):
"""Perform DOI activation task."""
def route_for_task(self, task, args=None, kwargs=None):
"""Return exchange, exchange_type, and routing_key."""
if task == 'hs_core.tasks.manage_task_nightly':
return {
'exchange': 'default',
'exchange_type': 'topic',
'routing_key': 'task.default',
}
return None
| {
"content_hash": "8357f0ffee2aea3962a8605e5f961b0f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 29.25,
"alnum_prop": 0.5427350427350427,
"repo_name": "hydroshare/hydroshare",
"id": "d43a4e74f4301177e627f376d476cb8eaaa50d5f",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hs_core/router.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183727"
},
{
"name": "Dockerfile",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "950010"
},
{
"name": "JavaScript",
"bytes": "1450537"
},
{
"name": "Python",
"bytes": "5786593"
},
{
"name": "R",
"bytes": "4904"
},
{
"name": "Shell",
"bytes": "94173"
},
{
"name": "Vue",
"bytes": "32043"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ActionClusterVersion'
db.delete_table(u'actionclusters_actionclusterversion')
def backwards(self, orm):
# Adding model 'ActionClusterVersion'
db.create_table(u'actionclusters_actionclusterversion', (
('website', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('slug', self.gf('us_ignite.common.fields.AutoUUIDField')(max_length=50, unique=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('assistance', self.gf('django.db.models.fields.TextField')(blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('impact_statement', self.gf('django.db.models.fields.TextField')(blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=500, blank=True)),
('actioncluster', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['actionclusters.ActionCluster'])),
('team_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('team_description', self.gf('django.db.models.fields.TextField')(blank=True)),
('acknowledgments', self.gf('django.db.models.fields.TextField')(blank=True)),
('stage', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'actionclusters', ['ActionClusterVersion'])
models = {
u'actionclusters.actioncluster': {
'Meta': {'ordering': "('-is_featured', '-created')", 'object_name': 'ActionCluster'},
'acknowledgments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'assistance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.Domain']", 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['actionclusters.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
'features_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'impact_statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'membership_set_for_actioncluster'", 'symmetrical': 'False', 'through': u"orm['actionclusters.ActionClusterMembership']", 'to': u"orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ownership_set_for_actioncluster'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'slug': ('us_ignite.common.fields.AutoUUIDField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'actionclusters.actionclustermedia': {
'Meta': {'ordering': "('created',)", 'object_name': 'ActionClusterMedia'},
'actioncluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.ActionCluster']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'actionclusters.actionclustermembership': {
'Meta': {'unique_together': "(('user', 'actioncluster'),)", 'object_name': 'ActionClusterMembership'},
'actioncluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.ActionCluster']"}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'actionclusters.actionclusterurl': {
'Meta': {'object_name': 'ActionClusterURL'},
'actioncluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.ActionCluster']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500'})
},
u'actionclusters.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'actionclusters.feature': {
'Meta': {'object_name': 'Feature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'actionclusters.page': {
'Meta': {'object_name': 'Page'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'actionclusters.pageactioncluster': {
'Meta': {'ordering': "('order',)", 'object_name': 'PageActionCluster'},
'actioncluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.ActionCluster']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['actionclusters.Page']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['actionclusters'] | {
"content_hash": "e6b80d05e6f2261d7bf8178ffe585531",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 247,
"avg_line_length": 81.98101265822785,
"alnum_prop": 0.5761599629429476,
"repo_name": "us-ignite/us_ignite",
"id": "fd3933dbf1456d4964825226f61f7d8c9fc0311d",
"size": "12977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/actionclusters/migrations/0003_auto__del_actionclusterversion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
"""
Description
"""
__author__ = 'awang'
from tornado.web import RequestHandler
from models import Dao
from models import Session
from models import User
class BaseController(RequestHandler):
""""""
@property
def rds(self):
return Dao.redis()
@property
def current_user(self):
""""""
return User(self.current_uid)
@property
def current_uid(self):
""""""
return self.session.data.get('uid', None)
@property
def session(self):
""""""
new_session = Session(self.current_sid)
if not self.current_sid:
# print('Set sid to cookie: {}'.format(new_session.sid))
self.set_secure_cookie('sid', new_session.sid, domain=self.settings['cookie_domain'])
print(self.settings['cookie_domain'])
# sid = self.create_signed_value('sid', new_session.sid)
# self.set_cookie('sid', sid, domain=None)
# print(self.create_signed_value('sid', new_session.sid))
return new_session
@property
def current_sid(self):
""""""
# if not self.get_secure_cookie('sid'):
# return Session().sid
# print('Start get sid from cookie: {}'.format(self.get_secure_cookie('sid')))
return self.get_secure_cookie('sid')
| {
"content_hash": "6b4217efc653e6e2cbdefa1cd8a30a6f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 97,
"avg_line_length": 26.32,
"alnum_prop": 0.5851063829787234,
"repo_name": "ryanduan/duanyongwang",
"id": "d40001b568b911a81397c984fb9e6828f7b1632e",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/base_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "13013"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url, patterns
from polls import views
urlpatterns = [
url(r'^home/$', views.home, name='home'),
url(r'^about/$', views.about, name='about'),
]
| {
"content_hash": "e1b8572a18e141621e37d0c86860f610",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.6631578947368421,
"repo_name": "Predator01/potential-adventure",
"id": "51a4351845807efe2f7230d1a996564e1c9bcb5c",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/polls/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "402779"
},
{
"name": "HTML",
"bytes": "231691"
},
{
"name": "JavaScript",
"bytes": "1078394"
},
{
"name": "Python",
"bytes": "12359"
},
{
"name": "Shell",
"bytes": "3749"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_auto_20171226_0910'),
]
operations = [
migrations.AlterField(
model_name='item',
name='invoice',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itens_nota', to='checkout.Invoice'),
),
]
| {
"content_hash": "9d59a7cee930b9166df8c4fc14ae15ed",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 131,
"avg_line_length": 25.894736842105264,
"alnum_prop": 0.6402439024390244,
"repo_name": "CoutinhoElias/danibraz",
"id": "81cebf86749958ed80dd48b9ebfe9839cb973f3b",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "danibraz/checkout/migrations/0005_auto_20171226_0924.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "321689"
},
{
"name": "HTML",
"bytes": "151507"
},
{
"name": "JavaScript",
"bytes": "1107690"
},
{
"name": "Python",
"bytes": "209840"
},
{
"name": "Shell",
"bytes": "4240"
}
],
"symlink_target": ""
} |
"""Story graph for the interactive storytelling system DoppioGioco."""
from collections import defaultdict
import csv
import os
from interactive_story.tension_evaluation import get_tension_value
from interactive_story.story_graph import StoryGraph
STORY_GRAPH_CSV = os.path.join(os.path.dirname(__file__),
'data/story_graph.csv')
UNIT_EMOTIONS_CSV = os.path.join(os.path.dirname(__file__),
'data/unit_emotions.csv')
UNIT_DETAILS_CSV = os.path.join(os.path.dirname(__file__),
'data/unit_details.csv')
UNIT_TEXTS_CSV = os.path.join(os.path.dirname(__file__),
'data/unit_texts.csv')
class DoppioGiocoStoryGraph(StoryGraph):
"""Extends the StoryGraph class to fit DoppioGioco."""
def __init__(self):
"""Initialize the story graph for DoppioGioco."""
super().__init__()
self.load_from_csv(STORY_GRAPH_CSV)
self._emotions = defaultdict()
self.load_emotions_from_csv(UNIT_EMOTIONS_CSV)
self.tension_function = get_tension_value
self._clip_uris = defaultdict()
self._initials = set()
self._finals = set()
self.load_units_details_from_csv(UNIT_DETAILS_CSV)
self._texts = defaultdict()
self.load_unit_texts_from_csv(UNIT_TEXTS_CSV)
def load_emotions_from_csv(self, emotions_csv):
"""Extract the emotions associated with units from a CSV file."""
with open(emotions_csv, 'r') as csv_file:
emotions_csv_reader = csv.reader(csv_file, delimiter=',')
csv_it = iter(emotions_csv_reader)
next(csv_it)
for pair in csv_it:
title, emotion = pair[0], pair[1]
if self.belongs_to_graph(title):
# annotate the emotion only if the unit actually belongs
# to the story graph, otherwise it is useless
self.annotate_emotion(title, emotion)
def load_units_details_from_csv(self, details_csv):
"""Load all unit details from a CSV file."""
with open(details_csv, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
csv_it = iter(csv_reader)
next(csv_it)
for detail in csv_it:
title, clip_uri, initial, final = detail
if self.belongs_to_graph(title):
# add unit details only if the unit actually belongs to the
# story graph
if clip_uri != "NULL":
self._clip_uris[title] = clip_uri
if int(initial) == 1:
self._initials.add(title)
if int(final) == 1:
self._finals.add(title)
def load_unit_texts_from_csv(self, texts_csv):
"""Load all unit texts from a CSV file.
There is a separate CSV for texts for two reasons:
* texts are very long, hence the details CSV is much smaller without
them;
* texts may be problematic for encoding, so it is better to handle them
separately.
"""
with open(texts_csv, 'r', encoding='utf8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
csv_it = iter(csv_reader)
next(csv_it)
for row in csv_it:
# skip empty lines
if not row: # empty sequences are false
continue
title, text = row
if self.belongs_to_graph(title):
# add unit details only if the unit actually belongs to the
# story graph
if text != "NULL":
self._texts[title] = text
def has_emotion(self, unit):
"""Whether or not the unit has an annotated emotion."""
return unit in self._emotions.keys()
def annotate_emotion(self, unit, emotion):
"""Annotate a unit with an emotion."""
self._emotions[unit] = emotion
def get_unit_emotion(self, unit):
"""Get the emotion associated with a unit."""
return self._emotions[unit]
def get_unit_tension(self, unit):
"""Get the tension value for a single unit."""
if self.has_emotion(unit):
tension_value = self.tension_function(self.get_unit_emotion(unit))
else:
tension_value = 0
return tension_value
def has_clip(self, unit):
"""Whether or not the unit has an associated clip."""
return unit in self._clip_uris.keys()
def get_unit_clip(self, unit):
"""Get the clip URI associated to the given unit."""
return self._clip_uris[unit]
def has_text(self, unit):
"""Whether or not the unit has an associated text."""
return unit in self._texts.keys()
def get_unit_text(self, unit):
"""Get the text associated to the given unit."""
return self._texts[unit]
def get_html_linear_story(self, story):
"""Create an HTML page to display a linear story."""
import html
html_story = """<html>
<head>
<meta charset="UTF-8">
</head>
<body>
<table>"""
for unit in story:
html_story += '<tr>'
# add unit title
html_story += '<td>{}</td>\n'.format(html.escape(unit))
if self.has_text(unit):
text = self.get_unit_text(unit)
text = html.escape(text).replace('\n', '<br />')
else:
text = "missing"
# add unit text
html_story += '<td>{}</td>\n'.format(text)
html_story += '</tr>\n'
html_story += """</body>
</html>"""
return html_story
def get_graphviz_graph(self):
"""Display the graph in a graphical way, using graphviz."""
from graphviz import Digraph
graph = Digraph(name=self.__class__.__name__, format='pdf')
for unit in sorted(self.get_nodes()):
if unit == '000':
color, fontcolor = '#000000', '#ffffff'
else:
color, fontcolor = self._get_emotion_color(
self.get_unit_emotion(unit))
graph.node(unit, style='filled', color='black',
fillcolor=color, fontcolor=fontcolor)
graph.edges(self._get_ordered_edge_list())
# set orientation to be left to right (LR)
graph.graph_attr.update(rankdir='LR')
# node displayed as boxes and not as ellipses
graph.node_attr.update(shape='circle')
# group together similar units
graph.body.append(self._get_unit_ranks())
return graph
@staticmethod
def _get_emotion_color(emotion):
# helper for drawing the graph, associate to each emotion a background
# color and a text color
positive_high = ('#0000ff', '#ffffff')
positive_low = ('#ffc0bf', '#000000')
negative_low = ('#c0c0ff', '#000000')
negative_high = ('#ff0000', '#ffffff')
emotions_to_color = {
"joy": positive_high,
"amusement": positive_high,
"pride": positive_high,
"pleasure": positive_low,
"relief": positive_low,
"interest": positive_low,
"hot anger": negative_high,
"panic fear": negative_high,
"despair": negative_high,
"irritation": negative_low,
"anxiety": negative_low,
"sadness": negative_low
}
return emotions_to_color[emotion]
@staticmethod
def _get_unit_ranks():
# helper for drawing the graph, group together similar units
return """{rank = same; 001 002 003 004}
{rank = same; 005 006 007 010}
{rank = same; 009 011 012 013}
{rank = same; 014 015 016}
{rank = same; 017 018 019 020 021}
{rank = same; 022 023 024 025}
{rank = same; 061 026 027 028}
{rank = same; 029 030 031 032}
{rank = same; 033 034 035 036}
{rank = same; 037 038 039 040}
{rank = same; 041 042 047 048}
{rank = same; 049 050 051 052}
{rank = same; 053 054 055 056}
{rank = same; 057 058 059 060 062}
{rank = same; 063 064 065 066}
{rank = same; 067 068 069 070}
{rank = same; 071 072 073 074}
{rank = same; 075 076 077 078}
{rank = same; 079 080 081 082}
{rank = same; 083 084 085 086}
{rank = same; 087 088 089 090}
{rank = same; 107 108 109 110}"""
| {
"content_hash": "d8a483bbf7af769b6192bff80fd8f6fa",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 37.44690265486726,
"alnum_prop": 0.5602032376225925,
"repo_name": "msilvestro/dupin",
"id": "3571fe0e4543a780b045655697733ff7d16a8dde",
"size": "8463",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "interactive_story/dg_story_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86149"
}
],
"symlink_target": ""
} |
from .models import ModelFactory
from .utils import import_simplejson
from .error import TweepError
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self, payload):
"""
Parse the error message from payload.
If unable to parse the message, throw an exception
and default error message will be used.
"""
raise NotImplementedError
class RawParser(Parser):
def __init__(self):
pass
def parse(self, method, payload):
return payload
def parse_error(self, payload):
return payload
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
json = self.json_lib.loads(payload)
except Exception, e:
raise TweepError('Failed to parse JSON payload: %s' % e)
needsCursors = method.parameters.has_key('cursor')
if needsCursors and isinstance(json, dict) and 'previous_cursor' in json and 'next_cursor' in json:
cursors = json['previous_cursor'], json['next_cursor']
return json, cursors
else:
return json
def parse_error(self, payload):
error = self.json_lib.loads(payload)
if error.has_key('error'):
return error['error']
else:
return error['errors']
class ModelParser(JSONParser):
def __init__(self, model_factory=None):
JSONParser.__init__(self)
self.model_factory = model_factory or ModelFactory
def parse(self, method, payload):
try:
if method.payload_type is None: return
model = getattr(self.model_factory, method.payload_type)
except AttributeError:
raise TweepError('No model for this payload type: %s' % method.payload_type)
json = JSONParser.parse(self, method, payload)
if isinstance(json, tuple):
json, cursors = json
else:
cursors = None
if method.payload_list:
result = model.parse_list(method.api, json)
else:
result = model.parse(method.api, json)
if cursors:
return result, cursors
else:
return result
| {
"content_hash": "7b264aa5e3047394f71baccd9a2a552b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 107,
"avg_line_length": 27.182795698924732,
"alnum_prop": 0.5992879746835443,
"repo_name": "balanced/status.balancedpayments.com",
"id": "aa3fe491474186ad260c84b0b2759fae79b5ddd5",
"size": "2604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "situation/tweepy/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "270082"
},
{
"name": "JavaScript",
"bytes": "18462"
},
{
"name": "Python",
"bytes": "1533112"
},
{
"name": "Shell",
"bytes": "3717"
}
],
"symlink_target": ""
} |
"""
The pyro method-of-lines advection solver. This uses a piecewise linear
reconstruction in space together with a Runge-Kutta integration for time.
"""
__all__ = ['simulation']
from .simulation import Simulation
| {
"content_hash": "dd0c1f38a5210ead5c33e985df4316a5",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.7592592592592593,
"repo_name": "zingale/pyro2",
"id": "3e1c9e24f262a00cca0bfd9a9415936feba5bec6",
"size": "216",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pyro/advection_rk/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "273"
},
{
"name": "Jupyter Notebook",
"bytes": "1959967"
},
{
"name": "Logos",
"bytes": "1124"
},
{
"name": "Makefile",
"bytes": "2101"
},
{
"name": "Python",
"bytes": "702493"
},
{
"name": "Shell",
"bytes": "217"
},
{
"name": "TeX",
"bytes": "16580"
},
{
"name": "Yacc",
"bytes": "962"
}
],
"symlink_target": ""
} |
from tempest_lib.common.utils import data_utils
from tempest.api.compute import base
from tempest import test
class KeyPairsV2TestJSON(base.BaseComputeTest):
_api_version = 2
@classmethod
def setup_clients(cls):
super(KeyPairsV2TestJSON, cls).setup_clients()
cls.client = cls.keypairs_client
def _delete_keypair(self, keypair_name):
self.client.delete_keypair(keypair_name)
def _create_keypair(self, keypair_name, pub_key=None):
body = self.client.create_keypair(keypair_name, pub_key)
self.addCleanup(self._delete_keypair, keypair_name)
return body
@test.idempotent_id('1d1dbedb-d7a0-432a-9d09-83f543c3c19b')
def test_keypairs_create_list_delete(self):
# Keypairs created should be available in the response list
# Create 3 keypairs
key_list = list()
for i in range(3):
k_name = data_utils.rand_name('keypair')
keypair = self._create_keypair(k_name)
# Need to pop these keys so that our compare doesn't fail later,
# as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
fetched_list = self.client.list_keypairs()
# We need to remove the extra 'keypair' element in the
# returned dict. See comment in keypairs_client.list_keypairs()
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
fetched_list = new_list
# Now check if all the created keypairs are in the fetched list
missing_kps = [kp for kp in key_list if kp not in fetched_list]
self.assertFalse(missing_kps,
"Failed to find keypairs %s in fetched list"
% ', '.join(m_key['name'] for m_key in missing_kps))
@test.idempotent_id('6c1d3123-4519-4742-9194-622cb1714b7d')
def test_keypair_create_delete(self):
# Keypair should be created, verified and deleted
k_name = data_utils.rand_name('keypair')
keypair = self._create_keypair(k_name)
private_key = keypair['private_key']
key_name = keypair['name']
self.assertEqual(key_name, k_name,
"The created keypair name is not equal "
"to the requested name")
self.assertTrue(private_key is not None,
"Field private_key is empty or not found.")
@test.idempotent_id('a4233d5d-52d8-47cc-9a25-e1864527e3df')
def test_get_keypair_detail(self):
# Keypair should be created, Got details by name and deleted
k_name = data_utils.rand_name('keypair')
self._create_keypair(k_name)
keypair_detail = self.client.get_keypair(k_name)
self.assertIn('name', keypair_detail)
self.assertIn('public_key', keypair_detail)
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
public_key = keypair_detail['public_key']
self.assertTrue(public_key is not None,
"Field public_key is empty or not found.")
@test.idempotent_id('39c90c6a-304a-49dd-95ec-2366129def05')
def test_keypair_create_with_pub_key(self):
# Keypair should be created with a given public key
k_name = data_utils.rand_name('keypair')
pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
"Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
"aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
"KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
"I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
"TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
"LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
"XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
"snSA8wzBx3A/8y9Pp1B nova@ubuntu")
keypair = self._create_keypair(k_name, pub_key)
self.assertFalse('private_key' in keypair,
"Field private_key is not empty!")
key_name = keypair['name']
self.assertEqual(key_name, k_name,
"The created keypair name is not equal "
"to the requested name!")
| {
"content_hash": "963dfc5de57a6d3ace67ee7e835dbe68",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 45.75757575757576,
"alnum_prop": 0.617439293598234,
"repo_name": "yamt/tempest",
"id": "01e3c869ed4602376e98555c23c9920c1afe9b8a",
"size": "5166",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/api/compute/keypairs/test_keypairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2739641"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
"""
Post-processes the force coefficients from a cuIBM simulation.
This script reads the forces, computes the mean forces within a given range,
computes the Strouhal number within a range, plots the force coefficients,
saves the figure, and prints a data-frame that contains the mean values.
"""
from snake.cuibm.simulation import CuIBMSimulation
simulation = CuIBMSimulation()
simulation.read_forces()
time_limits = (60.0, 80.0)
simulation.get_mean_forces(limits=time_limits)
simulation.get_strouhal(limits=time_limits, order=200)
simulation.plot_forces(display_coefficients=True,
coefficient=2.0,
limits=(0.0, 80.0, -0.5, 1.5),
style='seaborn-dark',
save_name='forceCoefficients')
dataframe = simulation.create_dataframe_forces(display_strouhal=True,
display_coefficients=True,
coefficient=2.0)
print(dataframe)
| {
"content_hash": "13546d040dbb1baab35cb2b0c49401d2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 37.25925925925926,
"alnum_prop": 0.6421471172962226,
"repo_name": "barbagroup/cuIBM",
"id": "b933f36657c63fc9a88121b34b0e53c2289f53ed",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/cylinder/Re100/scripts/plotForceCoefficients.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "281"
},
{
"name": "C++",
"bytes": "115124"
},
{
"name": "Cuda",
"bytes": "185373"
},
{
"name": "Makefile",
"bytes": "5426"
},
{
"name": "Python",
"bytes": "619"
},
{
"name": "TeX",
"bytes": "1453"
}
],
"symlink_target": ""
} |
import requests
import bs4
import csv
def find_between( text, first, last ):
try:
start = text.index( first ) + len( first )
end = text.index( last, start )
return text[start:end]
except ValueError:
return ""
def extract_name(str_val):
return find_between( str_val, '>', '<' )
def extract_id(url_text):
return int(url_text[url_text.index('y')+2:])
def calc_num_pages(whole_soup):
sub_soup = whole_soup.findAll('td', attrs={'width':'*'})
for crumb in sub_soup:
if('Page 1' in crumb.text):
return int(find_between(crumb.text, 'Page 1 of ', ','))
def write_sub_dict_csv(sub_dict, id0):
word = csv.writer(open("storage/zoo_"+str(id0)+".csv", "w"))
for key, val in sub_dict.items():
word.writerow([key, val])
def process_webpage(url_text):
response = requests.get(url_text)
while(response.status_code != 200):
response = requests.get(url_text)
print ' ******* Problem getting webpage ', url_text
print ' ******* Tried again '
return bs4.BeautifulSoup(response.text)
def extract_table_links(table_dict, sub_soup):
subtext = sub_soup.find('table', attrs={'cellpadding':'0'})
for link in subtext.find_all('a'):
href = link.get('href')
if(not(href == None) and 'ntaxa' in href):
table_dict[href] = link | {
"content_hash": "98362a407431601c9d3c82cfe8ee5109",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 67,
"avg_line_length": 28.5625,
"alnum_prop": 0.5988329686360321,
"repo_name": "sa501428/playground",
"id": "75b71072964135750369a57f26bb3842882fb85d",
"size": "1394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Web Scraping/python_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "165079"
},
{
"name": "Matlab",
"bytes": "16138"
},
{
"name": "Python",
"bytes": "11198"
}
],
"symlink_target": ""
} |
from ctypes import *
# Let's map the Microsoft types to ctypes for clarity
BYTE = c_ubyte
WORD = c_ushort
DWORD = c_ulong
LPBYTE = POINTER(c_ubyte)
LPTSTR = POINTER(c_char)
HANDLE = c_void_p
PVOID = c_void_p
LPVOID = c_void_p
UINT_PTR = c_ulong
SIZE_T = c_ulong
# Constants
DEBUG_PROCESS = 0x00000001
CREATE_NEW_CONSOLE = 0x00000010
PROCESS_ALL_ACCESS = 0x001F0FFF
INFINITE = 0xFFFFFFFF
DBG_CONTINUE = 0x00010002
# Debug event constants
EXCEPTION_DEBUG_EVENT = 0x1
CREATE_THREAD_DEBUG_EVENT = 0x2
CREATE_PROCESS_DEBUG_EVENT = 0x3
EXIT_THREAD_DEBUG_EVENT = 0x4
EXIT_PROCESS_DEBUG_EVENT = 0x5
LOAD_DLL_DEBUG_EVENT = 0x6
UNLOAD_DLL_DEBUG_EVENT = 0x7
OUTPUT_DEBUG_STRING_EVENT = 0x8
RIP_EVENT = 0x9
# debug exception codes.
EXCEPTION_ACCESS_VIOLATION = 0xC0000005
EXCEPTION_BREAKPOINT = 0x80000003
EXCEPTION_GUARD_PAGE = 0x80000001
EXCEPTION_SINGLE_STEP = 0x80000004
# Thread constants for CreateToolhelp32Snapshot()
TH32CS_SNAPHEAPLIST = 0x00000001
TH32CS_SNAPPROCESS = 0x00000002
TH32CS_SNAPTHREAD = 0x00000004
TH32CS_SNAPMODULE = 0x00000008
TH32CS_INHERIT = 0x80000000
TH32CS_SNAPALL = (TH32CS_SNAPHEAPLIST | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD | TH32CS_SNAPMODULE)
THREAD_ALL_ACCESS = 0x001F03FF
# Context flags for GetThreadContext()
CONTEXT_FULL = 0x00010007
CONTEXT_DEBUG_REGISTERS = 0x00010010
# Memory permissions
PAGE_EXECUTE_READWRITE = 0x00000040
# Hardware breakpoint conditions
HW_ACCESS = 0x00000003
HW_EXECUTE = 0x00000000
HW_WRITE = 0x00000001
# Memory page permissions, used by VirtualProtect()
PAGE_NOACCESS = 0x00000001
PAGE_READONLY = 0x00000002
PAGE_READWRITE = 0x00000004
PAGE_WRITECOPY = 0x00000008
PAGE_EXECUTE = 0x00000010
PAGE_EXECUTE_READ = 0x00000020
PAGE_EXECUTE_READWRITE = 0x00000040
PAGE_EXECUTE_WRITECOPY = 0x00000080
PAGE_GUARD = 0x00000100
PAGE_NOCACHE = 0x00000200
PAGE_WRITECOMBINE = 0x00000400
# Structures for CreateProcessA() function
# STARTUPINFO describes how to spawn the process
class STARTUPINFO(Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPTSTR),
("lpDesktop", LPTSTR),
("lpTitle", LPTSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute",DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# PROCESS_INFORMATION receives its information
# after the target process has been successfully
# started.
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# When the dwDebugEventCode is evaluated
class EXCEPTION_RECORD(Structure):
pass
EXCEPTION_RECORD._fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", POINTER(EXCEPTION_RECORD)),
("ExceptionAddress", PVOID),
("NumberParameters", DWORD),
("ExceptionInformation", UINT_PTR * 15),
]
class _EXCEPTION_RECORD(Structure):
_fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", POINTER(EXCEPTION_RECORD)),
("ExceptionAddress", PVOID),
("NumberParameters", DWORD),
("ExceptionInformation", UINT_PTR * 15),
]
# Exceptions
class EXCEPTION_DEBUG_INFO(Structure):
_fields_ = [
("ExceptionRecord", EXCEPTION_RECORD),
("dwFirstChance", DWORD),
]
# it populates this union appropriately
class DEBUG_EVENT_UNION(Union):
_fields_ = [
("Exception", EXCEPTION_DEBUG_INFO),
# ("CreateThread", CREATE_THREAD_DEBUG_INFO),
# ("CreateProcessInfo", CREATE_PROCESS_DEBUG_INFO),
# ("ExitThread", EXIT_THREAD_DEBUG_INFO),
# ("ExitProcess", EXIT_PROCESS_DEBUG_INFO),
# ("LoadDll", LOAD_DLL_DEBUG_INFO),
# ("UnloadDll", UNLOAD_DLL_DEBUG_INFO),
# ("DebugString", OUTPUT_DEBUG_STRING_INFO),
# ("RipInfo", RIP_INFO),
]
# DEBUG_EVENT describes a debugging event
# that the debugger has trapped
class DEBUG_EVENT(Structure):
_fields_ = [
("dwDebugEventCode", DWORD),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
("u", DEBUG_EVENT_UNION),
]
# Used by the CONTEXT structure
class FLOATING_SAVE_AREA(Structure):
_fields_ = [
("ControlWord", DWORD),
("StatusWord", DWORD),
("TagWord", DWORD),
("ErrorOffset", DWORD),
("ErrorSelector", DWORD),
("DataOffset", DWORD),
("DataSelector", DWORD),
("RegisterArea", BYTE * 80),
("Cr0NpxState", DWORD),
]
# The CONTEXT structure which holds all of the
# register values after a GetThreadContext() call
class CONTEXT(Structure):
_fields_ = [
("ContextFlags", DWORD),
("Dr0", DWORD),
("Dr1", DWORD),
("Dr2", DWORD),
("Dr3", DWORD),
("Dr6", DWORD),
("Dr7", DWORD),
("FloatSave", FLOATING_SAVE_AREA),
("SegGs", DWORD),
("SegFs", DWORD),
("SegEs", DWORD),
("SegDs", DWORD),
("Edi", DWORD),
("Esi", DWORD),
("Ebx", DWORD),
("Edx", DWORD),
("Ecx", DWORD),
("Eax", DWORD),
("Ebp", DWORD),
("Eip", DWORD),
("SegCs", DWORD),
("EFlags", DWORD),
("Esp", DWORD),
("SegSs", DWORD),
("ExtendedRegisters", BYTE * 512),
]
# THREADENTRY32 contains information about a thread
# we use this for enumerating all of the system threads
class THREADENTRY32(Structure):
_fields_ = [
("dwSize", DWORD),
("cntUsage", DWORD),
("th32ThreadID", DWORD),
("th32OwnerProcessID", DWORD),
("tpBasePri", DWORD),
("tpDeltaPri", DWORD),
("dwFlags", DWORD),
]
# Supporting struct for the SYSTEM_INFO_UNION union
class PROC_STRUCT(Structure):
_fields_ = [
("wProcessorArchitecture", WORD),
("wReserved", WORD),
]
# Supporting union for the SYSTEM_INFO struct
class SYSTEM_INFO_UNION(Union):
_fields_ = [
("dwOemId", DWORD),
("sProcStruc", PROC_STRUCT),
]
# SYSTEM_INFO structure is populated when a call to
# kernel32.GetSystemInfo() is made. We use the dwPageSize
# member for size calculations when setting memory breakpoints
class SYSTEM_INFO(Structure):
_fields_ = [
("uSysInfo", SYSTEM_INFO_UNION),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
]
# MEMORY_BASIC_INFORMATION contains information about a
# particular region of memory. A call to kernel32.VirtualQuery()
# populates this structure.
class MEMORY_BASIC_INFORMATION(Structure):
_fields_ = [
("BaseAddress", PVOID),
("AllocationBase", PVOID),
("AllocationProtect", DWORD),
("RegionSize", SIZE_T),
("State", DWORD),
("Protect", DWORD),
("Type", DWORD),
]
| {
"content_hash": "4de94f644bb5890c93fa462c73f2137b",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 104,
"avg_line_length": 30.455555555555556,
"alnum_prop": 0.5669463699379789,
"repo_name": "after1990s/little_utils",
"id": "d89e8d861a7ad9a7fe6447e03d1f13145507e65a",
"size": "8249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PE_to_shellcode/_debugger_defines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "30"
},
{
"name": "C",
"bytes": "2065"
},
{
"name": "C++",
"bytes": "17076"
},
{
"name": "HTML",
"bytes": "4518"
},
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "PHP",
"bytes": "1877"
},
{
"name": "PowerShell",
"bytes": "12251"
},
{
"name": "Python",
"bytes": "815233"
}
],
"symlink_target": ""
} |
"""This code example gets all first party audience segments.
To create first party audience segments, run create_audience_segments.py.
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
SUGGESTED_PAGE_LIMIT = 500
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201311')
# Specify bind value to filter on first party audience segments.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
}]
offset, result_set_size = 0, 0
while True:
# Create a statement to select first party audience segments.
filter_statement = {'query': 'WHERE Type = :type LIMIT %s OFFSET %s' % (
SUGGESTED_PAGE_LIMIT, offset),
'values': values}
response = audience_segment_service.getAudienceSegmentsByStatement(
filter_statement)[0]
if 'results' in response:
segments = response['results']
result_set_size = len(segments)
for segment in segments:
print ('Audience segment with id \'%s\' and name \'%s\' of size '
'%s was found. ' %
(segment['id'], segment['name'], segment['size']))
offset += result_set_size
if result_set_size != SUGGESTED_PAGE_LIMIT:
break
elif offset == 0:
print 'No Results Found'
break
print 'Number of results found: %d' % offset
| {
"content_hash": "60079de79d1ab0bfc796d3e9279fc708",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 27.952380952380953,
"alnum_prop": 0.6473594548551959,
"repo_name": "caioserra/apiAdwords",
"id": "59ba74e2a55e66b8affb963145cfee6e643a17fb",
"size": "2379",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201311/audience_segment_service/get_first_party_audience_segments.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
} |
'''
Created on Mar 1, 2017
@author: debanjan
'''
"""
Borrowed from textacy (https://github.com/chartbeat-labs/textacy)
"""
import sys
PY2 = int(sys.version[0]) == 2
if PY2:
from backports import csv
from itertools import izip as zip
bytes_ = str
unicode_ = unicode
string_types = (str, unicode)
def unicode_to_bytes(s, encoding='utf8', errors='strict'):
return s.encode(encoding=encoding, errors=errors)
def bytes_to_unicode(b, encoding='utf8', errors='strict'):
return unicode_(b, encoding=encoding, errors=errors)
else:
import csv
zip = zip
bytes_ = bytes
unicode_ = str
string_types = (bytes, str)
def unicode_to_bytes(s, encoding='utf8', errors='strict'):
return s.encode(encoding=encoding, errors=errors)
def bytes_to_unicode(b, encoding='utf8', errors='strict'):
return b.decode(encoding=encoding, errors=errors) | {
"content_hash": "664c7a7661e870b1b94d431fc3dad4c5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 65,
"avg_line_length": 23.487179487179485,
"alnum_prop": 0.6550218340611353,
"repo_name": "debanjanmahata/textmining-kit",
"id": "863e158ba681e2971723f4c48f23a930c9dc8753",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51008"
}
],
"symlink_target": ""
} |
import os
import abc
import glob
from datetime import datetime
import warnings
import numpy as np
class StaticBase(object):
"""
The StaticBase class serves as a template for i/o objects used in
GriddedStaticBase.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, filename, mode='r', **kwargs):
"""
Initialization of i/o object.
Parameters
----------
filename : str
File name.
mode : str, optional
Opening mode. Default: r
"""
self.filename = filename
self.mode = mode
self.kwargs = kwargs
@abc.abstractmethod
def read(self, gpi):
"""
Read data for given grid point.
Parameters
----------
gpi : int
Grid point index.
Returns
-------
data : numpy.ndarray
Data set.
"""
return
@abc.abstractmethod
def write(self, data):
"""
Write data.
Parameters
----------
data : numpy.ndarray
Data records.
"""
return
@abc.abstractmethod
def flush(self):
"""
Flush data.
"""
return
@abc.abstractmethod
def close(self):
"""
Close file.
"""
return
class TsBase(object):
"""
The TsBase class serves as a template for i/o objects used in
GriddedTsBase.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, filename, mode='r', **kwargs):
"""
Initialization of i/o object.
Parameters
----------
filename : str
File name.
mode : str, optional
Opening mode. Default: r
"""
self.filename = filename
self.mode = mode
self.kwargs = kwargs
@abc.abstractmethod
def read_ts(self, gpi, **kwargs):
"""
Read time series data for given grid point.
Parameters
----------
gpi : int
Grid point index.
Returns
-------
data : object
pygeobase.object_base.TS object.
"""
return
@abc.abstractmethod
def write_ts(self, gpi, data, **kwargs):
"""
Write data.
Parameters
----------
gpi : int
Grid point index.
data : object
pygeobase.object_base.TS object.
"""
return
def flush(self):
"""
Flush data.
"""
return
def close(self):
"""
Close file.
"""
return
class ImageBase(object):
"""
ImageBase class serves as a template for i/o objects used for reading
and writing image data.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, filename, mode='r', **kwargs):
"""
Initialization of i/o object.
Parameters
----------
filename : str
Filename path.
mode : str, optional
Opening mode. Default: r
"""
self.filename = filename
self.mode = mode
self.kwargs = kwargs
@abc.abstractmethod
def read(self, **kwargs):
"""
Read data of an image file.
Returns
-------
image : object
pygeobase.object_base.Image object
"""
return
def read_masked_data(self, **kwargs):
"""
Read data of an image file and mask the data according to
specifications.
Returns
-------
image : object
pygeobase.object_base.Image object
"""
raise NotImplementedError('Please implement to enable.')
def resample_data(self, image, index, distance, windowRadius, **kwargs):
"""
Takes an image and resample (interpolate) the image data to
arbitrary defined locations given by index and distance.
Parameters
----------
image : object
pygeobase.object_base.Image object
index : np.array
Index into image data defining a look-up table for data elements
used in the interpolation process for each defined target
location.
distance : np.array
Array representing the distances of the image data to the
arbitrary defined locations.
Returns
-------
image : object
pygeobase.object_base.Image object
"""
raise NotImplementedError('Please implement to enable spatial '
'resampling.')
@abc.abstractmethod
def write(self, image, **kwargs):
"""
Write data to an image file.
Parameters
----------
image : object
pygeobase.object_base.Image object
"""
return
@abc.abstractmethod
def flush(self):
"""
Flush data.
"""
return
@abc.abstractmethod
def close(self):
"""
Close file.
"""
return
class GriddedBase(object):
"""
The GriddedBase class uses another IO class together with a grid
object to read/write a dataset under the given path.
Parameters
----------
path : string
Path to dataset.
grid : pygeogrids.BasicGrid of CellGrid instance
Grid on which the time series data is stored.
ioclass : class
IO class.
mode : str, optional
File mode and can be read 'r', write 'w' or append 'a'. Default: 'r'
fn_format : str, optional
The string format of the cell files. Default: '{:04d}'
ioclass_kws : dict, optional
Additional keyword arguments for the ioclass. Default: None
"""
__metaclass__ = abc.ABCMeta
def __init__(self, path, grid, ioclass, mode='r', fn_format='{:04d}',
ioclass_kws=None):
self.path = path
self.grid = grid
self.ioclass = ioclass
self.mode = mode
self.fn_format = fn_format
self.previous_cell = None
self.fid = None
if ioclass_kws is None:
self.ioclass_kws = {}
else:
self.ioclass_kws = ioclass_kws
def __enter__(self):
"""
Context manager initialization.
Returns
-------
self : GriddedStaticBase object
self
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the runtime context related to this object. The file will be
closed. The parameters describe the exception that caused the
context to be exited.
exc_type :
exc_value :
traceback :
"""
self.close()
def _open(self, gp):
"""
Open file.
Parameters
----------
gp : int
Grid point.
"""
cell = self.grid.gpi2cell(gp)
filename = os.path.join(self.path, self.fn_format.format(cell))
if self.mode == 'r':
if self.previous_cell != cell:
self.close()
self.previous_cell = cell
self.fid = self.ioclass(filename, mode=self.mode,
**self.ioclass_kws)
if self.mode in ['w', 'a']:
if self.previous_cell != cell:
self.flush()
self.close()
self.previous_cell = cell
self.fid = self.ioclass(filename, mode=self.mode,
**self.ioclass_kws)
def _read_lonlat(self, lon, lat, **kwargs):
"""
Reading data for given longitude and latitude coordinate.
Parameters
----------
lon : float
Longitude coordinate.
lat : float
Latitude coordinate.
Returns
-------
data : dict of values
data record.
"""
gp, _ = self.grid.find_nearest_gpi(lon, lat)
return self._read_gp(gp, **kwargs)
def _read_gp(self, gp, **kwargs):
"""
Read data for given grid point.
Parameters
----------
gp : int
Grid point.
Returns
-------
data : numpy.ndarray
Data set.
"""
if self.mode in ['w', 'a']:
raise IOError("File is not open in read mode")
self._open(gp)
return self.fid.read(gp, **kwargs)
def read(self, *args, **kwargs):
"""
Takes either 1 or 2 arguments and calls the correct function
which is either reading the gpi directly or finding
the nearest gpi from given lat,lon coordinates and then reading it
"""
if len(args) == 1:
data = self._read_gp(args[0], **kwargs)
if len(args) == 2:
data = self._read_lonlat(args[0], args[1], **kwargs)
if len(args) < 1 or len(args) > 2:
raise ValueError("Wrong number of arguments")
return data
def _write_lonlat(self, lon, lat, data, **kwargs):
"""
Write time series for given longitude and latitude coordinate.
Parameters
----------
lon : float
Longitude coordinate.
lat : float
Latitude coordinate.
data : numpy.ndarray
Data records.
"""
gp, _ = self.grid.find_nearest_gpi(lon, lat)
return self._write_gp(gp, data, **kwargs)
def write(self, *args, **kwargs):
"""
Takes either 1 or 2 arguments and calls the correct function
which is either reading the gpi directly or finding
the nearest gpi from given lat,lon coordinates and then reading it
"""
if len(args) == 1:
# args: data
self._write_gp(args[0]['gpi'], args[0], **kwargs)
if len(args) == 2:
# args: gp, data
self._write_gp(args[0], args[1], **kwargs)
if len(args) == 3:
# args: lon, lat, data
self._write_lonlat(args[0], args[1], args[2], **kwargs)
if len(args) < 1 or len(args) > 3:
raise ValueError("Wrong number of arguments")
def _write_gp(self, gp, data, **kwargs):
"""
Write data for given grid point.
Parameters
----------
gp : int
Grid point.
data : numpy.ndarray
Data
"""
if self.mode in ['r']:
raise IOError("File is not open in write/append mode")
self._open(gp)
self.fid.write(gp, data, **kwargs)
def iter_gp(self):
"""
Yield all values for all grid points.
Yields
------
data : pandas.DataFrame
Data set.
gp : int
Grid point.
"""
gp_info = list(self.grid.grid_points())
gps = np.array(gp_info, dtype=np.int)[:, 0]
for gp in gps:
yield self._read_gp(gp), gp
def flush(self):
"""
Flush data.
"""
if self.fid is not None:
self.fid.flush()
def close(self):
"""
Close file.
"""
if self.fid is not None:
self.fid.close()
self.fid = None
class GriddedStaticBase(GriddedBase):
"""
The GriddedStaticBase class uses another IO class together with a grid
object to read/write a dataset under the given path.
"""
warnings.warn("GriddedStaticBase is deprecated,"
" please use GriddedBase instead.", DeprecationWarning)
class GriddedTsBase(GriddedBase):
"""
The GriddedTsBase class uses another IO class together with a grid object
to read/write a time series dataset under the given path.
"""
def _read_gp(self, gp, **kwargs):
"""
Reads time series for a given grid point index.
Parameters
----------
gp : int
Grid point.
Returns
-------
data : object
pygeobase.object_base.TS object
"""
if self.mode in ['w', 'a']:
raise IOError("File is not open in read mode")
self._open(gp)
return self.fid.read_ts(gp, **kwargs)
def _write_gp(self, gp, data, **kwargs):
"""
Write data for given grid point.
Parameters
----------
gp : int
Grid point.
data : object
pygeobase.object_base.TS object
"""
if self.mode in ['r']:
raise IOError("File is not open in write/append mode")
self._open(gp)
lon, lat = self.grid.gpi2lonlat(gp)
self.fid.write_ts(gp, data, lon=lon, lat=lat, **kwargs)
def read_ts(self, *args, **kwargs):
"""
Takes either 1 or 2 arguments and calls the correct function
which is either reading the gpi directly or finding
the nearest gpi from given lat,lon coordinates and then reading it
"""
warnings.warn("read_ts is deprecated, please use read "
"instead.", DeprecationWarning)
return self.read(*args, **kwargs)
def write_ts(self, *args, **kwargs):
"""
Takes either 1, 2 or 3 arguments (the last one always needs to be the
data to be written) and calls the correct function which is either
writing the gp directly or finding the nearest gp from given
lon, lat coordinates and then reading it.
"""
warnings.warn("write_ts is deprecated, please use write "
"instead.", DeprecationWarning)
return self.write(*args, **kwargs)
def iter_ts(self):
"""
Yield time series for all grid points.
Yields
------
data : object
pygeobase.object_base.TS object
gp : int
Grid point.
"""
warnings.warn("iter_ts is deprecated, please use iter_gp "
"instead.", DeprecationWarning)
return self.iter_gp()
class MultiTemporalImageBase(object):
"""
The MultiTemporalImageBase class make use of an ImageBase object to
read/write a sequence of multi temporal images under a given path.
Parameters
----------
path : string
Path to dataset.
ioclass : class
IO class.
mode : str, optional
File mode and can be read 'r', write 'w' or append 'a'. Default: 'r'
fname_templ : str
Filename template of the data to read. Default placeholder for
parsing datetime information into the fname_templ is "{datetime}".
e.g. "ASCAT_{datetime}_image.nc" will be translated into the filename
ASCAT_20070101_image.nc for the date 2007-01-01.
datetime_format : str
String specifying the format of the datetime object to be parsed
into the fname_template.
e.g. "%Y/%m" will result in 2007/01 for datetime 2007-01-01 12:15:00
subpath_templ : list, optional
If given it is used to generate a sub-paths from the given timestamp.
Each item in the list represents one folder level. This can be used
if the files for May 2007 are e.g. in folders 2007/05/ then the
files can be accessed via the list ['%Y', '%m'].
ioclass_kws : dict
Additional keyword arguments for the ioclass.
exact_templ : boolean, optional
If True then the fname_templ matches the filename exactly.
If False then the fname_templ will be used in glob to find the file.
dtime_placeholder : str
String used in fname_templ as placeholder for datetime.
Default value is "datetime".
"""
__metaclass__ = abc.ABCMeta
def __init__(self, path, ioclass, mode='r', fname_templ="",
datetime_format="", subpath_templ=None, ioclass_kws=None,
exact_templ=True, dtime_placeholder="datetime"):
self.path = path
self.ioclass = ioclass
self.mode = mode
self.fname_templ = fname_templ
self.datetime_format = datetime_format
self.subpath_templ = subpath_templ
self.exact_templ = exact_templ
self.dtime_placeholder = dtime_placeholder
self.fid = None
if ioclass_kws is None:
self.ioclass_kws = {}
else:
self.ioclass_kws = ioclass_kws
def __enter__(self):
"""
Context manager initialization.
Returns
-------
self : GriddedBaseTs object
self
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the runtime context related to this object. The file will be
closed. The parameters describe the exception that caused the
context to be exited.
exc_type :
exc_value :
traceback :
"""
self.close()
def flush(self):
"""
Flush data.
"""
if self.fid is not None:
self.fid.flush()
def close(self):
"""
Close file.
"""
if self.fid is not None:
self.fid.close()
self.fid = None
def _open(self, filepath):
"""
Open file.
Parameters
----------
filepath : str
Path to file.
"""
self.close()
self.fid = self.ioclass(filepath, mode=self.mode, **self.ioclass_kws)
def _search_files(self, timestamp, custom_templ=None, str_param=None,
custom_datetime_format=None):
"""
searches for filenames with the given timestamp. This function is
used by _build_filename which then checks if a unique filename was
found.
Parameters
----------
timestamp: datetime
Datetime for given filename
custom_tmpl : string, optional
If given the custom_templ is used instead of the fname_templ. This
is convenient for some datasets where not all file names follow
the same convention and where the read_image function can choose
between templates based on some condition.
custom_datetime_format: string, optional
If given the custom_datetime_format will be used instead of the
datetime_format. This adds support to search for multiple files
for example for a given day, a given month or a specific year.
str_param : dict, optional
If given then this dict will be applied to the fname_templ using
the fname_templ.format(**str_param) notation before the resulting
string is put into datetime.strftime.
- example from python documentation:
coord = {'latitude': '37.24N', 'longitude': '-115.81W'}
'Coordinates: {latitude}, {longitude}'.format(**coord)
'Coordinates: 37.24N, -115.81W'
"""
if custom_templ is not None:
fname_templ = custom_templ
else:
fname_templ = self.fname_templ
if custom_datetime_format is not None:
dFormat = {self.dtime_placeholder: custom_datetime_format}
else:
dFormat = {self.dtime_placeholder: self.datetime_format}
fname_templ = fname_templ.format(**dFormat)
if str_param is not None:
fname_templ = fname_templ.format(**str_param)
sub_path = ''
if self.subpath_templ is not None:
for s in self.subpath_templ:
sub_path = os.path.join(sub_path, timestamp.strftime(s))
search_file = os.path.join(self.path, sub_path,
timestamp.strftime(fname_templ))
if self.exact_templ:
return [search_file]
else:
filename = glob.glob(search_file)
if not filename:
filename = []
return filename
def _build_filename(self, timestamp, custom_templ=None,
str_param=None):
"""
This function uses _search_files to find the correct
filename and checks if the search was unambiguous
Parameters
----------
timestamp: datetime
datetime for given filename
custom_tmpl : string, optional
If given the fname_templ is not used but the custom_templ. This
is convenient for some datasets where not all file names follow
the same convention and where the read_image function can choose
between templates based on some condition.
str_param : dict, optional
If given then this dict will be applied to the fname_templ using
the fname_templ.format(**str_param) notation before the resulting
string is put into datetime.strftime.
example from python documentation
>>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'}
>>> 'Coordinates: {latitude}, {longitude}'.format(**coord)
'Coordinates: 37.24N, -115.81W'
"""
filename = self._search_files(timestamp, custom_templ=custom_templ,
str_param=str_param)
if len(filename) == 0:
raise IOError("No file found for {:}".format(timestamp.ctime()))
if len(filename) > 1:
raise IOError(
"File search is ambiguous {:}".format(filename))
return filename[0]
def _assemble_img(self, timestamp, mask=False, **kwargs):
"""
Function between read_img and _build_filename that can
be used to read a different file for each parameter in a image
dataset. In the standard implementation it is assumed
that all necessary information of an image is stored in the
one file whose filename is built by the _build_filname function.
Parameters
----------
timestamp : datetime
timestamp of the image to assemble
mask : optional, boolean
Switch to read already masked data which requires the
implementation of an read_mask_data() in the ioclass
Returns
-------
img: object
pygeobase.object_base.Image object
"""
filepath = self._build_filename(timestamp, **kwargs)
self._open(filepath, **kwargs)
kwargs['timestamp'] = timestamp
if mask is False:
img = self.fid.read(**kwargs)
else:
img = self.fid.read_masked_data(**kwargs)
return img
def read(self, timestamp, **kwargs):
"""
Return an image for a specific timestamp.
Parameters
----------
timestamp : datetime.datetime
Time stamp.
Returns
-------
image : object
pygeobase.object_base.Image object
"""
return self._assemble_img(timestamp, **kwargs)
def write(self, timestamp, data, **kwargs):
"""
Write image data for a given timestamp.
Parameters
----------
timestamp : datetime.datetime
exact timestamp of the image
data : object
pygeobase.object_base.Image object
"""
if self.mode in ['r']:
raise IOError("File is not open in write/append mode")
filename = self._build_filename(timestamp)
self.fid.write(filename, data, **kwargs)
def get_tstamp_from_filename(self, filename):
"""
Return the timestamp contained in a given file name in accordance to
the defined fname_templ.
Parameters
----------
filename : string
File name.
Returns
-------
tstamp : datetime.dateime
Time stamp according to fname_templ as datetime object.
"""
StartPos = self.fname_templ.find(self.dtime_placeholder) - 1
EndPos = StartPos + len(datetime.now().strftime(self.datetime_format))
StringDate = filename[StartPos:EndPos]
return datetime.strptime(StringDate, self.datetime_format)
def tstamps_for_daterange(self, start_date, end_date):
"""
Return all valid timestamps in a given date range.
This method must be implemented if iteration over
images should be possible.
Parameters
----------
start_date : datetime.date or datetime.datetime
start date
end_date : datetime.date or datetime.datetime
end date
Returns
-------
dates : list
list of datetimes
"""
raise NotImplementedError(
"Please implement to enable iteration over date ranges.")
def iter_images(self, start_date, end_date, **kwargs):
"""
Yield all images for a given date range.
Parameters
----------
start_date : datetime.date or datetime.datetime
start date
end_date : datetime.date or datetime.datetime
end date
Returns
-------
image : object
pygeobase.object_base.Image object
"""
timestamps = self.tstamps_for_daterange(start_date, end_date)
if timestamps:
for timestamp in timestamps:
yield_img = self.read(timestamp, **kwargs)
yield yield_img
else:
raise IOError("no files found for given date range")
def daily_images(self, day, **kwargs):
"""
Yield all images for a day.
Parameters
----------
day : datetime.date
Returns
-------
img : object
pygeobase.object_base.Image object
"""
for img in self.iter_images(day, day, **kwargs):
yield img
def resample_image(self, *args, **kwargs):
return self.fid.resample_data(*args, **kwargs) | {
"content_hash": "0477ee9d9fbfd5afda7eeb2051a4ce4a",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 78,
"avg_line_length": 27.980624327233585,
"alnum_prop": 0.5421635762098946,
"repo_name": "christophreimer/pygeobase",
"id": "586f693737654c3515e967cf703f299f6b80e171",
"size": "27706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygeobase/io_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33610"
}
],
"symlink_target": ""
} |
from backend.site.serializers import ContactSubmissionSerializer
def test_contact_submission_serializer():
serializer = ContactSubmissionSerializer()
# check it escapes html tags, and converts paragraphs to html
data = {'message': '<h1>hello</h1>\nworld'}
msg = serializer.message_to_html(data)['message']
assert '<h1>' not in msg, 'it should escape html from user-submitted messages'
assert msg.count('<p>') == 2, 'it should wrap paragraphs in <p> tags'
assert msg.count('</p>') == 2, 'it should wrap paragraphs in <p> tags'
# check required fields
_, errors = serializer.load({'name': None,
'email': None,
'message': None})
assert 'Name is required.' in errors['name']
assert 'Email is required.' in errors['email']
assert 'Message is required.' in errors['message']
# check email must be valid
_, errors = serializer.load({'email': 'invalid'})
assert 'Not a valid email address.' in errors['email']
| {
"content_hash": "49bb3ebc34192630800af67923497353",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 43.041666666666664,
"alnum_prop": 0.6340755082284608,
"repo_name": "briancappello/flask-react-spa",
"id": "055435fadba67dd09ad46dd49a072e0949a51c4c",
"size": "1033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/site/serializers/test_contact_submission_serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8579"
},
{
"name": "Dockerfile",
"bytes": "1009"
},
{
"name": "HTML",
"bytes": "18126"
},
{
"name": "JavaScript",
"bytes": "169637"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Mako",
"bytes": "509"
},
{
"name": "Python",
"bytes": "282850"
},
{
"name": "Ruby",
"bytes": "5065"
},
{
"name": "Shell",
"bytes": "5231"
}
],
"symlink_target": ""
} |
"""Miscellaneous utility functions."""
from __future__ import absolute_import, division, with_statement
try:
# You can get the monotime module from:
# http://pypi.python.org/pypi/Monotime/
import monotime
except ImportError:
pass
import time
try:
# python 3.3 has time.monotonic(), or the monotime module might have
# added it.
monotime_impl = time.monotonic
except AttributeError:
import logging
logging.warning("time.monotonic() not available; using time.time()")
monotime_impl = time.time
# wrap monotime_impl so that monotime_impl can be reassigned in unit tests
def monotime():
return monotime_impl()
class ObjectDict(dict):
"""Makes a dictionary behave like an object."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def import_object(name):
"""Imports an object by name.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
"""
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
return getattr(obj, parts[-1])
# Fake byte literal support: In python 2.6+, you can say b"foo" to get
# a byte literal (str in 2.x, bytes in 3.x). There's no way to do this
# in a way that supports 2.5, though, so we need a function wrapper
# to convert our string literals. b() should only be applied to literal
# latin1 strings. Once we drop support for 2.5, we can remove this function
# and just use byte literals.
if str is unicode:
def b(s):
return s.encode('latin1')
bytes_type = bytes
else:
def b(s):
return s
bytes_type = str
def raise_exc_info(exc_info):
"""Re-raise an exception (with original traceback) from an exc_info tuple.
The argument is a ``(type, value, traceback)`` tuple as returned by
`sys.exc_info`.
"""
# 2to3 isn't smart enough to convert three-argument raise
# statements correctly in some cases.
if isinstance(exc_info[1], exc_info[0]):
raise exc_info[1], None, exc_info[2]
# After 2to3: raise exc_info[1].with_traceback(exc_info[2])
else:
# I think this branch is only taken for string exceptions,
# which were removed in Python 2.6.
raise exc_info[0], exc_info[1], exc_info[2]
# After 2to3: raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def doctests():
import doctest
return doctest.DocTestSuite()
| {
"content_hash": "11e26e0817438dfd0be669dba4799efd",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 30.573033707865168,
"alnum_prop": 0.6515986769570011,
"repo_name": "shineyear/catawampus",
"id": "6f60911416365551bda162d7e2ecd432e67e8534",
"size": "2721",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tr/vendor/tornado/tornado/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3855"
},
{
"name": "Python",
"bytes": "664568"
},
{
"name": "Shell",
"bytes": "85447"
}
],
"symlink_target": ""
} |
"""Real NVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"RealNVP",
"real_nvp_default_template"
]
class RealNVP(bijector.Bijector):
"""RealNVP "affine coupling layer" for vector-valued events.
Real NVP models a normalizing flow on a `D`-dimensional distribution via a
single `D-d`-dimensional conditional distribution [(Dinh et al., 2017)][1]:
`y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`
`y[0:d] = x[0:d]`
The last `D-d` units are scaled and shifted based on the first `d` units only,
while the first `d` units are 'masked' and left unchanged. Real NVP's
`shift_and_log_scale_fn` computes vector-valued quantities. For
scale-and-shift transforms that do not depend on any masked units, i.e.
`d=0`, use the `tfb.Affine` bijector with learned parameters instead.
Masking is currently only supported for base distributions with
`event_ndims=1`. For more sophisticated masking schemes like checkerboard or
channel-wise masking [(Papamakarios et al., 2016)[4], use the `tfb.Permute`
bijector to re-order desired masked units into the first `d` units. For base
distributions with `event_ndims > 1`, use the `tfb.Reshape` bijector to
flatten the event shape.
Recall that the MAF bijector [(Papamakarios et al., 2016)][4] implements a
normalizing flow via an autoregressive transformation. MAF and IAF have
opposite computational tradeoffs - MAF can train all units in parallel but
must sample units sequentially, while IAF must train units sequentially but
can sample in parallel. In contrast, Real NVP can compute both forward and
inverse computations in parallel. However, the lack of an autoregressive
transformations makes it less expressive on a per-bijector basis.
A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or
"mu" in [Papamakarios et al. (2016)][4]) and `log(scale)` (aka "alpha" in
[Papamakarios et al. (2016)][4]) such that each are broadcastable with the
arguments to `forward` and `inverse`, i.e., such that the calculations in
`forward`, `inverse` [below] are possible. For convenience,
`real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`
function.
NICE [(Dinh et al., 2014)][2] is a special case of the Real NVP bijector
which discards the scale transformation, resulting in a constant-time
inverse-log-determinant-Jacobian. To use a NICE bijector instead of Real
NVP, `shift_and_log_scale_fn` should return `(shift, None)`, and
`is_constant_jacobian` should be set to `True` in the `RealNVP` constructor.
Calling `real_nvp_default_template` with `shift_only=True` returns one such
NICE-compatible `shift_and_log_scale_fn`.
Caching: the scalar input depth `D` of the base distribution is not known at
construction time. The first call to any of `forward(x)`, `inverse(x)`,
`inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes
`D`, which is re-used in subsequent calls. This shape must be known prior to
graph execution (which is the case if using tf.layers).
#### Example Use
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
num_dims = 3
num_samples = 1
nvp = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=np.zeros(num_dims)),
bijector=tfb.RealNVP(
num_masked=2,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[512, 512])))
x = nvp.sample(num_samples)
nvp.log_prob(x)
nvp.log_prob(np.zeros([num_samples, num_dims]))
```
For more examples, see [Jang (2018)][3].
#### References
[1]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[2]: Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear
Independent Components Estimation. _arXiv preprint arXiv:1410.8516_,
2014. https://arxiv.org/abs/1410.8516
[3]: Eric Jang. Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows.
_Technical Report_, 2018. http://blog.evjang.com/2018/01/nf2.html
[4]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
num_masked,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
name=None):
"""Creates the Real NVP or NICE bijector.
Args:
num_masked: Python `int` indicating that the first `d` units of the event
should be masked. Must be in the closed interval `[1, D-1]`, where `D`
is the event size of the base distribution.
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`.
Typically the function contains `tf.Variables` and is wrapped using
`tf.make_template`. Returning `None` for either (both) `shift`,
`log_scale` is equivalent to (but more efficient than) returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
ValueError: If num_masked < 1.
"""
name = name or "real_nvp"
if num_masked <= 0:
raise ValueError("num_masked must be a positive integer.")
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
self._shift_and_log_scale_fn = shift_and_log_scale_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if self._input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
if self._num_masked >= self._input_depth:
raise ValueError(
"Number of masked units must be smaller than the event size.")
def _forward(self, x):
self._cache_input_depth(x)
# Performs scale and shift.
x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
y1 = x1
if log_scale is not None:
y1 *= math_ops.exp(log_scale)
if shift is not None:
y1 += shift
y = array_ops.concat([x0, y1], axis=-1)
return y
def _inverse(self, y):
self._cache_input_depth(y)
# Performs un-shift and un-scale.
y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
x1 = y1
if shift is not None:
x1 -= shift
if log_scale is not None:
x1 *= math_ops.exp(-log_scale)
x = array_ops.concat([y0, x1], axis=-1)
return x
def _inverse_log_det_jacobian(self, y):
self._cache_input_depth(y)
y0 = y[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
def _forward_log_det_jacobian(self, x):
self._cache_input_depth(x)
x0 = x[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=x.dtype, name="fldj")
return math_ops.reduce_sum(log_scale, axis=-1)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def real_nvp_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
name=None,
*args,
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with ops.name_scope(name, "real_nvp_default_template"):
def _fn(x, output_units):
"""Fully connected MLP parameterized via `real_nvp_template`."""
for units in hidden_layers:
x = layers.dense(
inputs=x,
units=units,
activation=activation,
*args,
**kwargs)
x = layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args,
**kwargs)
if shift_only:
return x, None
shift, log_scale = array_ops.split(x, 2, axis=-1)
return shift, log_scale
return template_ops.make_template(
"real_nvp_default_template", _fn)
| {
"content_hash": "971364b2118f32cd29b57b06e529a45d",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 80,
"avg_line_length": 40.450331125827816,
"alnum_prop": 0.6742796332678455,
"repo_name": "hfp/tensorflow-xsmm",
"id": "ab585a7de670b96c4e350684122c6401cf5f74e9",
"size": "12905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "523814"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "53558932"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1303624"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "896901"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75333"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "12166"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43811576"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "502374"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from quantum.openstack.common import cfg
DEFAULT_VLAN_RANGES = []
DEFAULT_INTERFACE_MAPPINGS = []
vlan_opts = [
cfg.StrOpt('tenant_network_type', default='local',
help="Network type for tenant networks "
"(local, vlan, or none)"),
cfg.ListOpt('network_vlan_ranges',
default=DEFAULT_VLAN_RANGES,
help="List of <physical_network>:<vlan_min>:<vlan_max> "
"or <physical_network>"),
]
database_opts = [
cfg.StrOpt('sql_connection', default='sqlite://'),
cfg.IntOpt('sql_max_retries', default=-1),
cfg.IntOpt('reconnect_interval', default=2),
]
bridge_opts = [
cfg.ListOpt('physical_interface_mappings',
default=DEFAULT_INTERFACE_MAPPINGS,
help="List of <physical_network>:<physical_interface>"),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2),
cfg.StrOpt('root_helper', default='sudo'),
]
cfg.CONF.register_opts(vlan_opts, "VLANS")
cfg.CONF.register_opts(database_opts, "DATABASE")
cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE")
cfg.CONF.register_opts(agent_opts, "AGENT")
| {
"content_hash": "ca8aff8f54ca07c9aec681b6e5733d03",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 29.86842105263158,
"alnum_prop": 0.6343612334801763,
"repo_name": "aristanetworks/arista-ovs-quantum",
"id": "45e86fa224054a06e4288221cb847f82a4760263",
"size": "1913",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantum/plugins/linuxbridge/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "2568389"
},
{
"name": "Scala",
"bytes": "4525"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, SkipTest, skipUnless
from test import test_support
from test.test_support import HOST, HOSTv6
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = StringIO.StringIO('x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(socket.has_ipv6, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
@classmethod
def setUpClass(cls):
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
raise SkipTest("IPv6 not enabled")
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| {
"content_hash": "afaee47855a083df7ea3670e74016917",
"timestamp": "",
"source": "github",
"line_count": 808,
"max_line_length": 85,
"avg_line_length": 32.37871287128713,
"alnum_prop": 0.5779374665545448,
"repo_name": "shiblon/pytour",
"id": "b3d8f7efb26dbcc762b994305bf7e11193a10860",
"size": "26162",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_ftplib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
} |
"""
Module for Input/Ouput
"""
from __future__ import absolute_import
from .core import save_all_figs # noqa
| {
"content_hash": "3c0c973779b987d30e3eccc74edae518",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 18.5,
"alnum_prop": 0.7027027027027027,
"repo_name": "tonysyu/mpltools",
"id": "9a51c9d47241196b53bc2c1bee557c95dfc8ff0d",
"size": "111",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mpltools/io/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "144"
},
{
"name": "Python",
"bytes": "76057"
}
],
"symlink_target": ""
} |
import copy
from sqlalchemy import exc as sa_exc
import testtools
from sahara.conductor import manager
from sahara import context
from sahara import exceptions as ex
import sahara.tests.unit.conductor.base as test_base
SAMPLE_CLUSTER = {
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"tenant_id": "tenant_1",
"name": "test_cluster",
"user_keypair_id": "my_keypair",
"node_groups": [
{
"name": "ng_1",
"flavor_id": "42",
"node_processes": ["p1", "p2"],
"count": 1,
"security_groups": None,
'use_autoconfig': True,
"shares": None
},
{
"name": "ng_2",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 3,
"security_groups": ["group1", "group2"],
'use_autoconfig': True,
"shares": None
}
],
"cluster_configs": {
"service_1": {
"config_2": "value_2"
},
"service_2": {
"config_1": "value_1"
}
},
}
class ClusterTest(test_base.ConductorManagerTestCase):
def __init__(self, *args, **kwargs):
super(ClusterTest, self).__init__(
checks=[
lambda: SAMPLE_CLUSTER,
lambda: manager.CLUSTER_DEFAULTS,
lambda: manager.NODE_GROUP_DEFAULTS,
lambda: manager.INSTANCE_DEFAULTS,
], *args, **kwargs)
def test_cluster_create_list_delete(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cluster_db_obj, dict)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(1, len(lst))
cl_id = lst[0]["id"]
self.api.cluster_destroy(ctx, cl_id)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(0, len(lst))
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_destroy(ctx, cl_id)
def test_duplicate_cluster_create(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
with testtools.ExpectedException(ex.DBDuplicateEntry):
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
def test_cluster_fields(self):
ctx = context.ctx()
cl_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cl_db_obj, dict)
for key, val in SAMPLE_CLUSTER.items():
if key == 'node_groups':
# this will be checked separately
continue
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
for ng in cl_db_obj["node_groups"]:
ng.pop("created_at")
ng.pop("updated_at")
ng.pop("id")
self.assertEqual(cl_db_obj["id"], ng.pop("cluster_id"))
ng.pop("image_id")
self.assertEqual([], ng.pop("instances"))
ng.pop("node_configs")
ng.pop("node_group_template_id")
ng.pop("volume_mount_prefix")
ng.pop("volumes_size")
ng.pop("volumes_per_node")
ng.pop("volumes_availability_zone")
ng.pop("volume_type")
ng.pop("floating_ip_pool")
ng.pop("image_username")
ng.pop("open_ports")
ng.pop("auto_security_group")
ng.pop("is_proxy_gateway")
ng.pop("tenant_id")
ng.pop("availability_zone")
ng.pop('volume_local_to_instance')
self.assertEqual(SAMPLE_CLUSTER["node_groups"],
cl_db_obj["node_groups"])
def test_cluster_no_ng(self):
ctx = context.ctx()
cluster_schema = copy.deepcopy(SAMPLE_CLUSTER)
cluster_schema.pop('node_groups')
cl_db_obj = self.api.cluster_create(ctx, cluster_schema)
self.assertIsInstance(cl_db_obj, dict)
for key, val in cluster_schema.items():
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
self.assertEqual([], cl_db_obj["node_groups"])
def test_cluster_update_status(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
updated_cl = self.api.cluster_update(ctx, _id, {"status": "Active"})
self.assertIsInstance(updated_cl, dict)
self.assertEqual("Active", updated_cl["status"])
get_cl_obj = self.api.cluster_get(ctx, _id)
self.assertEqual(updated_cl, get_cl_obj)
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_update(ctx, "bad_id", {"status": "Active"})
def _ng_in_cluster(self, cluster_db_obj, ng_id):
for ng in cluster_db_obj["node_groups"]:
if ng["id"] == ng_id:
return ng
return None
def test_add_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
node_group = {
"name": "ng_3",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 5
}
ng_id = self.api.node_group_add(ctx, _id, node_group)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "New Node Group not found")
def test_update_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
self.assertEqual(2, len(cluster_db_obj["node_groups"]))
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_update(ctx, ng_id, {"image_id": "test_image"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "Updated Node Group not found")
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("test_image", ng["image_id"])
def test_delete_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_remove(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertFalse(found_ng, "Node Group is still in a CLuster")
with testtools.ExpectedException(ex.NotFoundException):
self.api.node_group_remove(ctx, ng_id)
def _add_instance(self, ctx, ng_id):
instance = {
"instance_name": "additional_vm"
}
return self.api.instance_add(ctx, ng_id, instance)
def test_add_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
ng.pop('tenant_id')
self.assertEqual(count + 1, ng["count"])
self.assertEqual("additional_vm",
ng["instances"][0]["instance_name"])
def test_update_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
instance_id = self._add_instance(ctx, ng_id)
self.api.instance_update(ctx, instance_id,
{"management_ip": "1.1.1.1"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("1.1.1.1", ng["instances"][0]["management_ip"])
def test_remove_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
instance_id = self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.api.instance_remove(ctx, instance_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count, ng["count"])
with testtools.ExpectedException(ex.NotFoundException):
self.api.instance_remove(ctx, instance_id)
def test_cluster_search(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(1, len(lst))
kwargs = {'name': SAMPLE_CLUSTER['name'],
'plugin_name': SAMPLE_CLUSTER['plugin_name']}
lst = self.api.cluster_get_all(ctx, **kwargs)
self.assertEqual(1, len(lst))
# Valid field but no matching value
kwargs = {'name': SAMPLE_CLUSTER['name']+'foo'}
lst = self.api.cluster_get_all(ctx, **kwargs)
self.assertEqual(0, len(lst))
# Invalid field
self.assertRaises(sa_exc.InvalidRequestError,
self.api.cluster_get_all,
ctx, **{'badfield': 'somevalue'})
| {
"content_hash": "22de0cdb33f4b1496f04108c7fde864e",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 76,
"avg_line_length": 33.19016393442623,
"alnum_prop": 0.5488491553887188,
"repo_name": "ekasitk/sahara",
"id": "7b68a7bd36d687c58c42e8574e00aaa5752c272a",
"size": "10706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/conductor/manager/test_clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "19620"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3141724"
},
{
"name": "Shell",
"bytes": "52399"
}
],
"symlink_target": ""
} |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from optparse import OptionParser
import sleekxmpp
from sleekxmpp import Iq
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream import register_stanza_plugin
from jibrixmppclient import JibriElement
YSI = 'id-of-the-youtube-stream'
jibriiq = False
class ActionUserBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot that sends a custom action stanza
to another client.
"""
def __init__(self, jid, password, other, room, nick, roompass, url):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.action_provider = other
self.room = room
self.nick = nick
self.roompass = roompass
self.url = url
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start, threaded=True)
self.add_event_handler("message", self.message)
register_stanza_plugin(Iq, JibriElement)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
self.plugin['xep_0045'].joinMUC(self.room,
self.nick,
password=self.roompass)
#wait=True)
self.send_custom_iq()
def send_custom_iq(self):
"""Create and send two custom actions.
If the first action was successful, then send
a shutdown command and then disconnect.
"""
iq = self.Iq()
iq['to'] = self.action_provider
iq['type'] = 'set'
iq['jibri']._setAttr('action', 'start')
iq['jibri']._setAttr('url', self.url)
iq['jibri']._setAttr('streamid', YSI)
#iq['jibri']._setAttr('token','token')
global jibriiq
jibriiq = iq
try:
logging.info("Sending IQ: %s" % iq)
resp = iq.send()
logging.info("Got response: %s" % resp)
# The wait=True delays the disconnect until the queue
# of stanzas to be sent becomes empty.
self.disconnect(wait=True)
except XMPPError:
print('There was an error sending the custom action.')
def message(self, msg):
"""
Process incoming message stanzas.
Arguments:
msg -- The received message stanza.
"""
logging.info(msg['body'])
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
optp.add_option("-o", "--other", dest="other",
help="JID providing custom stanza")
optp.add_option("-r", "--room", dest="room",
help="MUC")
optp.add_option("-n", "--nick", dest="nick",
help="MUC nick")
optp.add_option("-u", "--url", dest="url",
help="url")
optp.add_option("-R", "--roompass", dest="roompass",
help="MUC password")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
exit('no user')
if opts.password is None:
exit('no pass')
if opts.url is None:
exit('no url')
if opts.other is None:
opts.other = 'jibri@auth.boris.jitsi.net'
if opts.nick is None:
opts.nick = 'nick'
if opts.room is None:
exit('no room')
if opts.roompass is None:
opts.roompass = 'password'
# Setup the CommandBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = ActionUserBot(opts.jid, opts.password, opts.other, opts.room, opts.nick, opts.roompass, opts.url)
#xmpp.register_plugin('xep_0030') # Service Discovery
#xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0045') # Multi-User Chat
#xmpp.register_plugin('xep_0050') # Adhoc Commands
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| {
"content_hash": "e2db8408a1a5cff38654d6f99eace0d7",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 108,
"avg_line_length": 34.14594594594595,
"alnum_prop": 0.5816051923381352,
"repo_name": "aaronkvanmeerten/jibri",
"id": "8109814f42674f0a1bbf003fda2e8eba1292f735",
"size": "6364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jibri-xmpp-client/custom_stanza_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82007"
},
{
"name": "Shell",
"bytes": "7472"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_hooligan_rodian_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","rodian_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "5e94937b9c9e4f875c6360bd50e2a406",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 24.307692307692307,
"alnum_prop": 0.7025316455696202,
"repo_name": "obi-two/Rebelion",
"id": "887a91a80188726a60da22a040ce1f8b8ea76a4b",
"size": "461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_hooligan_rodian_female_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import os
import os.path
import shutil
import stat
def build(source_path, build_path, install_path, targets):
print source_path, build_path, install_path, targets
if "install" in (targets or []):
# rez-build create the folder by default. We'll replace it by a symlink for simplicity.
if os.path.exists(install_path) and os.path.isdir(install_path):
os.rmdir(install_path) # Directory should be empty, let if crash if it fail.
#os.symlink(source_path, install_path)
shutil.copytree(source_path, install_path)
| {
"content_hash": "2d99723c49dbb32d4aaea3b9c348bc7a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 95,
"avg_line_length": 35.3125,
"alnum_prop": 0.6902654867256637,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "bc54a747793656a3584aea735f71d6492071b791",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rezbuild.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
from subprocess import call
import os
from SiddhiCEP3 import SiddhiLoader
# Download extension jars
call(["mvn", "install"], cwd=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/Resources/Extensions3/")
# Add extensions
extensions_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/Resources/Extensions3/jars/*"
SiddhiLoader.addExtensionPath(extensions_path)
import unittest
import logging
from time import sleep
from SiddhiCEP3.DataTypes.LongType import LongType
from SiddhiCEP3.core.SiddhiManager import SiddhiManager
from SiddhiCEP3.core.query.output.callback.QueryCallback import QueryCallback
from SiddhiCEP3.core.util.EventPrinter import PrintEvent
logging.basicConfig(level=logging.INFO)
from unittest.case import TestCase
from Tests.Util.AtomicInt import AtomicInt
class TestExtensions(TestCase):
def setUp(self):
self.eventArrived = False
self.count = AtomicInt(0)
def testTimeSeriesSimpleLinearRegression(self):
logging.info("Simple Regression TestCase")
siddhiManager = SiddhiManager()
siddhiManager.setExtension("timeseries:regress", "org.wso2.extension.siddhi.execution.timeseries.LinearRegressionStreamProcessor")
inputStream = "define stream InputStream (y int, x int);"
siddhiApp = "@info(name = 'query1') from InputStream#timeseries:regress(1, 100, 0.95, y, x) " + \
"select * " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createExecutionPlanRuntime(inputStream + siddhiApp)
self.betaZero = 0
_self_shaddow = self
class QueryCallbackImpl(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp,inEvents,outEvents)
_self_shaddow.count.addAndGet(len(inEvents))
_self_shaddow.betaZero = inEvents[len(inEvents)-1].getData(3)
siddhiAppRuntime.addCallback("query1", QueryCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("InputStream")
siddhiAppRuntime.start()
inputHandler.send([2500.00, 17.00])
inputHandler.send([2600.00, 18.00])
inputHandler.send([3300.00, 31.00])
inputHandler.send([2475.00, 12.00])
inputHandler.send([2313.00, 8.00])
inputHandler.send([2175.00, 26.00])
inputHandler.send([600.00, 14.00])
inputHandler.send([460.00, 3.00])
inputHandler.send([240.00, 1.00])
inputHandler.send([200.00, 10.00])
inputHandler.send([177.00, 0.00])
inputHandler.send([140.00, 6.00])
inputHandler.send([117.00, 1.00])
inputHandler.send([115.00, 0.00])
inputHandler.send([2600.00, 19.00])
inputHandler.send([1907.00, 13.00])
inputHandler.send([1190.00, 3.00])
inputHandler.send([990.00, 16.00])
inputHandler.send([925.00, 6.00])
inputHandler.send([365.00, 0.00])
inputHandler.send([302.00, 10.00])
inputHandler.send([300.00, 6.00])
inputHandler.send([129.00, 2.00])
inputHandler.send([111.00, 1.00])
inputHandler.send([6100.00, 18.00])
inputHandler.send([4125.00, 19.00])
inputHandler.send([3213.00, 1.00])
inputHandler.send([2319.00, 38.00])
inputHandler.send([2000.00, 10.00])
inputHandler.send([1600.00, 0.00])
inputHandler.send([1394.00, 4.00])
inputHandler.send([935.00, 4.00])
inputHandler.send([850.00, 0.00])
inputHandler.send([775.00, 5.00])
inputHandler.send([760.00, 6.00])
inputHandler.send([629.00, 1.00])
inputHandler.send([275.00, 6.00])
inputHandler.send([120.00, 0.00])
inputHandler.send([2567.00, 12.00])
inputHandler.send([2500.00, 28.00])
inputHandler.send([2350.00, 21.00])
inputHandler.send([2317.00, 3.00])
inputHandler.send([2000.00, 12.00])
inputHandler.send([715.00, 1.00])
inputHandler.send([660.00, 9.00])
inputHandler.send([650.00, 0.00])
inputHandler.send([260.00, 0.00])
inputHandler.send([250.00, 1.00])
inputHandler.send([200.00, 13.00])
inputHandler.send([180.00, 6.00])
sleep(1)
self.assertEqual(50, self.count.get(),"No of events: ")
# Condition Loosened from equality due to floating point error
self.assertTrue(573.1418421169493-0.001<self.betaZero<573.1418421169493+0.001,"Beta0: " + str(573.1418421169493 - self.betaZero))
siddhiAppRuntime.shutdown()
def testMathRandomFunctionWithSeed(self):
logging.info("RandomFunctionExtension TestCase, with seed")
# Creating SiddhiManager
siddhiManager = SiddhiManager()
siddhiManager.setExtension("math:rand",
"org.wso2.extension.siddhi.execution.math.RandomFunctionExtension")
# Creating Query
streamDefinition = "define stream inputStream (symbol string, price long, volume long);"
query ="@info(name = 'query1') from inputStream select symbol , math:rand(12) as randNumber " + \
"insert into outputStream;"
# Setting up ExecutionPlan
executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(streamDefinition + query)
# Setting up callback
_self_shaddow = self
class ConcreteQueryCallback(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp, inEvents, outEvents)
_self_shaddow.count.addAndGet(len(inEvents))
_self_shaddow.eventArrived = True
if len(inEvents) == 3:
randNumbers = [0,0,0]
randNumbers[0] = inEvents[0].getData(1)
randNumbers[1] = inEvents[1].getData(1)
randNumbers[2] = inEvents[2].getData(1)
isDuplicatePresent = False
logging.info(randNumbers[0] + ", " + randNumbers[1])
if randNumbers[0] == randNumbers[1] or randNumbers[0] == randNumbers[2] or randNumbers[1] == randNumbers[2]:
isDuplicatePresent = True
_self_shaddow.assertEquals(False, isDuplicatePresent)
executionPlanRuntime.addCallback("query1", ConcreteQueryCallback())
# Retrieving input handler to push events into Siddhi
inputHandler = executionPlanRuntime.getInputHandler("inputStream")
# Starting event processing
executionPlanRuntime.start()
# Sending events to Siddhi
inputHandler.send(["IBM", 700.0, LongType(100)])
inputHandler.send(["WSO2", 60.5, LongType(200)])
inputHandler.send(["XYZ", 60.5, LongType(200)])
sleep(0.5)
self.assertEqual(self.count.get(), 3)
self.assertTrue(self.eventArrived)
siddhiManager.shutdown()
def testMathRandomFunctionWithoutSeed(self):
logging.info("RandomFunctionExtension TestCase, without seed")
# Creating SiddhiManager
siddhiManager = SiddhiManager()
# Creating Query
streamDefinition = "define stream inputStream (symbol string, price long, volume long);"
query ="@info(name = 'query1') from inputStream select symbol , math:rand() as randNumber " + \
"insert into outputStream;"
# Setting up ExecutionPlan
executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(streamDefinition + query)
# Setting up callback
_self_shaddow = self
class ConcreteQueryCallback(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp, inEvents, outEvents)
_self_shaddow.count.addAndGet(len(inEvents))
_self_shaddow.eventArrived = True
if len(inEvents) == 3:
randNumbers = [0,0,0]
randNumbers[0] = inEvents[0].getData(1)
randNumbers[1] = inEvents[1].getData(1)
randNumbers[2] = inEvents[2].getData(1)
isDuplicatePresent = False
if randNumbers[0] == randNumbers[1] or randNumbers[0] == randNumbers[2] or randNumbers[1] == randNumbers[2]:
isDuplicatePresent = True
_self_shaddow.assertEquals(False, isDuplicatePresent)
executionPlanRuntime.addCallback("query1", ConcreteQueryCallback())
# Retrieving input handler to push events into Siddhi
inputHandler = executionPlanRuntime.getInputHandler("inputStream")
# Starting event processing
executionPlanRuntime.start()
# Sending events to Siddhi
inputHandler.send(["IBM", 700.0, LongType(100)])
inputHandler.send(["WSO2", 60.5, LongType(200)])
inputHandler.send(["XYZ", 60.5, LongType(200)])
sleep(0.1)
self.assertEqual(self.count.get(), 3)
self.assertTrue(self.eventArrived)
siddhiManager.shutdown()
def testStringRegexpFunction(self):
logging.info("RegexpFunctionExtensionTestCase TestCase")
# Creating SiddhiManager
siddhiManager = SiddhiManager()
# Creating Query
streamDefinition = "define stream inputStream (symbol string, price long, regex string);"
query = "@info(name = 'query1') from inputStream select symbol , " + \
"str:regexp(symbol, regex) as beginsWithWSO2 " + \
"insert into outputStream"
# Setting up ExecutionPlan
executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(streamDefinition + query)
# Setting up callback
_self_shaddow = self
class ConcreteQueryCallback(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp, inEvents, outEvents)
for inEvent in inEvents:
_self_shaddow.count.addAndGet(1)
if _self_shaddow.count.get() == 1:
_self_shaddow.assertEqual(False, inEvent.getData(1))
if _self_shaddow.count.get() == 2:
_self_shaddow.assertEqual(True, inEvent.getData(1))
if _self_shaddow.count.get() == 3:
_self_shaddow.assertEqual(False, inEvent.getData(1))
_self_shaddow.eventArrived = True
executionPlanRuntime.addCallback("query1", ConcreteQueryCallback())
# Retrieving input handler to push events into Siddhi
inputHandler = executionPlanRuntime.getInputHandler("inputStream")
# Starting event processing
executionPlanRuntime.start()
# Sending events to Siddhi
inputHandler.send(["hello hi hello", 700.0, "^WSO2(.*)"])
inputHandler.send(["WSO2 abcdh", 60.5, "WSO(.*h)"])
inputHandler.send(["aaWSO2 hi hello", 60.5, "^WSO2(.*)"])
sleep(0.5)
self.assertEqual(self.count.get(), 3)
self.assertTrue(self.eventArrived)
siddhiManager.shutdown()
def testStringContainsFunction(self):
logging.info("ContainsFunctionExtensionTestCase TestCase")
# Creating SiddhiManager
siddhiManager = SiddhiManager()
# Creating Query
streamDefinition = "define stream inputStream (symbol string, price long, volume long);"
query = "@info(name = 'query1') " + \
"from inputStream " + \
"select symbol , str:contains(symbol, 'WSO2') as isContains " + \
"insert into outputStream;"
# Setting up Execution Plan
executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(streamDefinition + query)
# Setting up callback
_self_shaddow = self
class ConcreteQueryCallback(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp, inEvents, outEvents)
for inEvent in inEvents:
_self_shaddow.count.addAndGet(1)
if _self_shaddow.count.get() == 1:
_self_shaddow.assertEqual(False, inEvent.getData(1))
if _self_shaddow.count.get() == 2:
_self_shaddow.assertEqual(True, inEvent.getData(1))
if _self_shaddow.count.get() == 3:
_self_shaddow.assertEqual(True, inEvent.getData(1))
_self_shaddow.eventArrived = True
executionPlanRuntime.addCallback("query1", ConcreteQueryCallback())
# Retrieving input handler to push events into Siddhi
inputHandler = executionPlanRuntime.getInputHandler("inputStream")
# Starting event processing
executionPlanRuntime.start()
# Sending events to Siddhi
inputHandler.send(["IBM", 700.0, LongType(100)])
inputHandler.send(["WSO2", 60.5, LongType(200)])
inputHandler.send(["One of the best middleware is from WSO2.", 60.5, LongType(200)])
sleep(0.5)
self.assertEqual(self.count.get(),3)
self.assertTrue(self.eventArrived)
siddhiManager.shutdown()
| {
"content_hash": "3d3a34fe9bc4b6255ecdd9fbc0ac67c4",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 138,
"avg_line_length": 38.97947214076246,
"alnum_prop": 0.6220282876918447,
"repo_name": "madhawav/SiddhiCEPPythonAPI",
"id": "e49de79a93d39ca14c81181b98e272eebf0999b0",
"size": "13311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/SiddhiCEP3Tests/ExtensionsTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1074"
},
{
"name": "Java",
"bytes": "53615"
},
{
"name": "Makefile",
"bytes": "1872"
},
{
"name": "Python",
"bytes": "209793"
},
{
"name": "Shell",
"bytes": "974"
}
],
"symlink_target": ""
} |
import pytest
import os
import sys
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'templatefield.test.settings')
sys.exit(pytest.main())
| {
"content_hash": "a583a5d37211cd7325720994d7cfc146",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 18.125,
"alnum_prop": 0.7586206896551724,
"repo_name": "orcasgit/django-template-field",
"id": "c54b8fbd83eeb985ecccad95dab0fb22bd3944fa",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1260"
},
{
"name": "Python",
"bytes": "9122"
}
],
"symlink_target": ""
} |
"""
CERN@school - Processing Frames
See the README.md file for more information.
"""
#...for the operating stuff.
import os
#...for the file processing.
import glob
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
#...for file manipulation.
from shutil import rmtree
# Import the JSON library.
import json
#...for processing the datasets.
from cernatschool.dataset import Dataset
#...for making time.
from cernatschool.handlers import make_time_dir
#...for making the frame and clusters images.
from visualisation.visualisation import makeFrameImage
if __name__ == "__main__":
print("*")
print("*======================================*")
print("* CERN@school - local frame processing *")
print("*======================================*")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("inputPath", help="Path to the input dataset.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.inputPath
#
# Check if the input directory exists. If it doesn't, quit.
if not os.path.isdir(datapath):
raise IOError("* ERROR: '%s' input directory does not exist!" % (datapath))
## The output path.
outputpath = args.outputPath
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename = outputpath + '/log_process-frames.log', filemode='w', level=level)
print("*")
print("* Input path : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("*")
# Set up the directories
#------------------------
# Create the subdirectories.
## The path to the frame images.
frame_output_path = os.path.join(outputpath, "PNG")
#
if os.path.isdir(frame_output_path):
rmtree(frame_output_path)
lg.info(" * Removing directory '%s'..." % (frame_output_path))
os.mkdir(frame_output_path)
lg.info(" * Creating directory '%s'..." % (frame_output_path))
lg.info("")
## The path to the dataset.
dataset_path = os.path.join(datapath, "RAW/ASCIIxyC")
## The dataset to process.
ds = Dataset(dataset_path)
# Get the metadata from the JSON.
## The frame metadata.
fmd = None
#
with open(os.path.join(datapath, "geo.json"), "r") as fmdf:
fmd = json.load(fmdf, fmd)
#
## Latitude of the dataset [deg.].
lat = fmd['lat'] # [deg.]
#
## Longitude of the dataset [deg.].
lon = fmd['lon'] # [deg.]
#
## Altitude of the dataset [m].
alt = fmd['alt'] # [m]
## The pixel mask.
pixel_mask = {}
with open(os.path.join(datapath, "masked_pixels.txt"), "r") as mpf:
rows = mpf.readlines()
for row in rows:
vals = [int(val) for val in row.strip().split("\t")]
x = vals[0]; y = vals[1]; X = (256*y) + x; C = 1
pixel_mask[X] = C
## The frames from the dataset.
frames = ds.getFrames((lat, lon, alt), pixelmask = pixel_mask)
lg.info("* Found %d datafiles." % (len(frames)))
## A list of frames.
mds = []
# Loop over the frames and upload them to the DFC.
for f in frames:
## The basename for the data frame, based on frame information.
bn = "%s_%s" % (f.getChipId(), make_time_dir(f.getStartTimeSec()))
#bn = "%s_%d-%06d" % (f.getChipId(), f.getStartTimeSec(), f.getStartTimeSubSec())
# Create the frame image.
makeFrameImage(bn, f.getPixelMap(), frame_output_path, f.getPixelMask())
# Create the metadata dictionary for the frame.
metadata = {
"id" : bn,
#
"chipid" : f.getChipId(),
"hv" : f.getBiasVoltage(),
"ikrum" : f.getIKrum(),
#
"lat" : f.getLatitude(),
"lon" : f.getLongitude(),
"alt" : f.getAltitude(),
#
"start_time" : f.getStartTimeSec(),
"end_time" : f.getEndTimeSec(),
"acqtime" : f.getAcqTime(),
#
"n_pixel" : f.getNumberOfUnmaskedPixels(),
"occ" : f.getOccupancy(),
"occ_pc" : f.getOccupancyPc(),
#
"n_kluster" : f.getNumberOfKlusters(),
"n_gamma" : f.getNumberOfGammas(),
"n_non_gamma" : f.getNumberOfNonGammas(),
#
"ismc" : int(f.isMC())
}
# Add the frame metadata to the list of frames.
mds.append(metadata)
# Write out the frame information to a JSON file.
# We will use this later to make the frame plots,
# rather than processing the whole frame set again.
#
with open(os.path.join(outputpath, "frames.json"), "w") as jf:
json.dump(mds, jf)
| {
"content_hash": "653b67989248c6e7d59792326d3b461b",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 97,
"avg_line_length": 29.646408839779006,
"alnum_prop": 0.5585165859112933,
"repo_name": "CERNatschool/particle-rate-plotter",
"id": "2629cfff9902b42aeb61e84ba8a5374f308be899",
"size": "5413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process-frames.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149334"
},
{
"name": "Shell",
"bytes": "187"
}
],
"symlink_target": ""
} |
from __future__ import division
from collections import deque
import time
import math
import random
import networkx as nx
import matplotlib.pyplot as plt
class HeuSearch():
def __init__(self, sourceword, dest, datalist, outlist, inlist):
self.sourceword = sourceword
self.dest = dest
self.contentdata = datalist
self.contentin = inlist
self.contentout = outlist
self.numofinlinks = {}
self.numofoutlinks = {}
self.visit = {}
self.insert = {}
self.links = {}
self.linkss = {}
self.cat = {}
self.wordlist = []
self.set2 = set()
self.set4 = set()
self.initialize()
def initialize(self):
"""
initialize,reading files including inlinks and outlinks,and categories
"""
#file1 = open('data.txt','r')
#content = file1.readlines()
for line in self.contentdata:
component = line[:-1].split(' ')
self.visit[component[0]] = 0
self.numofinlinks[component[0]] = int(component[1])
self.numofoutlinks[component[0]] = int(component[2])
self.wordlist.append(component[0])
self.insert[component[0]] = 0
#file1.close()
#file2 = open('inlinks.txt','r')
#content = file2.readlines()
for line in self.contentin:
component = line.split(' ')
keyword = component[0]
list = []
for item in component:
if item[-1] == '\n':
item = item[:-1]
if item == component[0]:
continue
else:
list.append(item)
self.links[keyword] = list
#file2.close()
#file3 = open('outlinks.txt','r')
#content = file3.readlines()
for line in self.contentout:
component = line.split(' ')
keyword = component[0]
list = []
for item in component:
if item[-1] == '\n':
item = item[:-1]
if item == component[0]:
continue
else:
list.append(item)
self.linkss[keyword] = list
def value(self,word):
try:
set1 = set(self.linkss[word])
set3 = set1 & self.set2
size = len(set3)
except:
size = 0
try:
set5 = set(self.links[word])
size_ = len(set5&self.set4)
except:
size_ = 0
if not self.numofinlinks.has_key(word):
self.numofinlinks[word] = 0
if not self.numofinlinks.has_key(self.dest):
self.numofinlinks[self.dest] = 0
if not self.numofoutlinks.has_key(word):
self.numofoutlinks[word] = 0
if not self.numofoutlinks.has_key(self.dest):
self.numofoutlinks[word] = 0
#return size/(self.numofoutlinks[word] + self.numofinlinks[self.dest] - size) + size_/(self.numofoutlinks[self.dest] + self.numofinlinks[word] -
# size_)
return size + size_
def Search(self):
start = time.clock()
self.insert[self.sourceword] = 1
self.set2 = set(self.links[self.dest])
self.set4 = set(self.linkss[self.dest])
"""搜索从这里开始---------------------------------------------------------------------"""
source = node(self.sourceword, '', self.value(self.sourceword))
temp = source
queue= []
queue.append(source)
flag = False
index = 0
q = []
while True:
temp = queue.pop(0)
q.append(temp)
#print temp.key
if self.visit[temp.key] == 0:
self.visit[temp.key] = 1
else:
continue
if temp.key == '维基共享资源' or temp.key == '国际标准书号':
continue
index += 1
if temp.key == self.dest:
break;
if not self.linkss.has_key(temp.key):
continue
"""这里可以获得一个节点的出链---------------------------------------------------"""
linklist = self.linkss[temp.key]
for word in linklist:
if word == self.dest:
temp = node(word, temp.path, self.value(word) )
q.append(temp)
flag = True
break
if self.visit[word] == 0:
if self.insert[word] == 0:
subnode = node(word, temp.path, self.value(word))
queue.append(subnode)
self.insert[word] = 1
if flag == True:
break
queue = sorted(queue, key=lambda item:item.keyvalue, reverse=True)
etime = time.clock() -start
return (temp.path[2:], index, etime, self.drawgraph(temp.path,q))
def drawgraph(self,path,nodelist):
color = {}
graph = nx.Graph()
for item in nodelist:
graph.add_node(item.key.decode('utf-8'))
if item.key in path.split('->'):
color[item.key.decode('utf-8')] = 'green'
for item in nodelist:
if item.path == '':
continue
s = item.path.split('->')
for i in range(0,len(s) - 1):
if i == 0:
continue
graph.add_edge(s[i].decode('utf-8'),s[i+1].decode('utf-8'))
values = [color.get(node,'red') for node in graph.nodes() ]
pos = nx.spring_layout(graph)
if len(nodelist) > 500:
nx.draw_networkx(graph,font_family='SimHei', node_size=50,node_color=values, font_size = 5)
else:
nx.draw_networkx(graph,font_family='SimHei', node_size=1000,node_color=values, font_size = 10)
plt.savefig('HSearch.png')
plt.close()
return None
"""
node 为每个节点的类,其中key为关键词,path为当前路径,keyvalue为估价值
"""
class node():
def __init__(self, key, path,keyvalue):
self.key = key
self.path = path + '->' + key
self.keyvalue = keyvalue
if __name__ == '__main__':
source = raw_input('source is: \n')
dest = raw_input('dest is :\n')
print source
print dest
#try:
hsearch = HeuSearch(source,dest)
path,num,etime,graph = hsearch.search()
print path + ' ' + str(num) + ' nodes ' + ' use ' + str(etime) + 's'
| {
"content_hash": "1178b9e56e48b1a130eeeb2d07205f56",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 151,
"avg_line_length": 34.8494623655914,
"alnum_prop": 0.49166923788954026,
"repo_name": "laosiaudi/Wiki-search",
"id": "8f0b2356aaa406a3c8913d7cf18e1722816cb9af",
"size": "6760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/heuristic_search.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "Python",
"bytes": "48301"
},
{
"name": "Shell",
"bytes": "255"
}
],
"symlink_target": ""
} |
from llvmlite import ir
from numba import cuda, types
from numba.core import cgutils
from numba.core.errors import RequireLiteralValue
from numba.core.typing import signature
from numba.core.extending import overload_attribute
from numba.cuda import nvvmutils
from numba.cuda.extending import intrinsic
#-------------------------------------------------------------------------------
# Grid functions
def _type_grid_function(ndim):
val = ndim.literal_value
if val == 1:
restype = types.int32
elif val in (2, 3):
restype = types.UniTuple(types.int32, val)
else:
raise ValueError('argument can only be 1, 2, 3')
return signature(restype, types.int32)
@intrinsic
def grid(typingctx, ndim):
'''grid(ndim)
Return the absolute position of the current thread in the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel. If *ndim* is 1, a single integer is returned.
If *ndim* is 2 or 3, a tuple of the given number of integers is returned.
Computation of the first integer is as follows::
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
'''
if not isinstance(ndim, types.IntegerLiteral):
raise RequireLiteralValue(ndim)
sig = _type_grid_function(ndim)
def codegen(context, builder, sig, args):
restype = sig.return_type
if restype == types.int32:
return nvvmutils.get_global_id(builder, dim=1)
elif isinstance(restype, types.UniTuple):
ids = nvvmutils.get_global_id(builder, dim=restype.count)
return cgutils.pack_array(builder, ids)
return sig, codegen
@intrinsic
def gridsize(typingctx, ndim):
'''gridsize(ndim)
Return the absolute size (or shape) in threads of the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel. If *ndim* is 1, a single integer is returned.
If *ndim* is 2 or 3, a tuple of the given number of integers is returned.
Computation of the first integer is as follows::
cuda.blockDim.x * cuda.gridDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
'''
if not isinstance(ndim, types.IntegerLiteral):
raise RequireLiteralValue(ndim)
sig = _type_grid_function(ndim)
def _nthreads_for_dim(builder, dim):
ntid = nvvmutils.call_sreg(builder, f"ntid.{dim}")
nctaid = nvvmutils.call_sreg(builder, f"nctaid.{dim}")
return builder.mul(ntid, nctaid)
def codegen(context, builder, sig, args):
restype = sig.return_type
nx = _nthreads_for_dim(builder, 'x')
if restype == types.int32:
return nx
elif isinstance(restype, types.UniTuple):
ny = _nthreads_for_dim(builder, 'y')
if restype.count == 2:
return cgutils.pack_array(builder, (nx, ny))
elif restype.count == 3:
nz = _nthreads_for_dim(builder, 'z')
return cgutils.pack_array(builder, (nx, ny, nz))
return sig, codegen
@intrinsic
def _warpsize(typingctx):
sig = signature(types.int32)
def codegen(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'warpsize')
return sig, codegen
@overload_attribute(types.Module(cuda), 'warpsize', target='cuda')
def cuda_warpsize(mod):
'''
The size of a warp. All architectures implemented to date have a warp size
of 32.
'''
def get(mod):
return _warpsize()
return get
#-------------------------------------------------------------------------------
# syncthreads
@intrinsic
def syncthreads(typingctx):
'''
Synchronize all threads in the same thread block. This function implements
the same pattern as barriers in traditional multi-threaded programming: this
function waits until all threads in the block call it, at which point it
returns control to all its callers.
'''
sig = signature(types.none)
def codegen(context, builder, sig, args):
fname = 'llvm.nvvm.barrier0'
lmod = builder.module
fnty = ir.FunctionType(ir.VoidType(), ())
sync = cgutils.get_or_insert_function(lmod, fnty, fname)
builder.call(sync, ())
return context.get_dummy_value()
return sig, codegen
def _syncthreads_predicate(typingctx, predicate, fname):
if not isinstance(predicate, types.Integer):
return None
sig = signature(types.i4, types.i4)
def codegen(context, builder, sig, args):
fnty = ir.FunctionType(ir.IntType(32), (ir.IntType(32),))
sync = cgutils.get_or_insert_function(builder.module, fnty, fname)
return builder.call(sync, args)
return sig, codegen
@intrinsic
def syncthreads_count(typingctx, predicate):
'''
syncthreads_count(predicate)
An extension to numba.cuda.syncthreads where the return value is a count
of the threads where predicate is true.
'''
fname = 'llvm.nvvm.barrier0.popc'
return _syncthreads_predicate(typingctx, predicate, fname)
@intrinsic
def syncthreads_and(typingctx, predicate):
'''
syncthreads_and(predicate)
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
true for all threads or 0 otherwise.
'''
fname = 'llvm.nvvm.barrier0.and'
return _syncthreads_predicate(typingctx, predicate, fname)
@intrinsic
def syncthreads_or(typingctx, predicate):
'''
syncthreads_or(predicate)
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
true for any thread or 0 otherwise.
'''
fname = 'llvm.nvvm.barrier0.or'
return _syncthreads_predicate(typingctx, predicate, fname)
| {
"content_hash": "9f044c90b3e7ee2e8f99414f72959b4d",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 80,
"avg_line_length": 29.918781725888326,
"alnum_prop": 0.6518493383101459,
"repo_name": "cpcloud/numba",
"id": "746669ed8d7927f7d17db8122ca7ad7633341543",
"size": "5894",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "numba/cuda/intrinsics.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3699"
},
{
"name": "C",
"bytes": "573767"
},
{
"name": "C++",
"bytes": "166526"
},
{
"name": "Cuda",
"bytes": "1110"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "9320077"
},
{
"name": "Shell",
"bytes": "13454"
}
],
"symlink_target": ""
} |
"""
Tests for txdav.caldav.datastore.util.
"""
import textwrap
from twisted.trial.unittest import TestCase as BaseTestCase
from txweb2.http_headers import MimeType
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.ical import Component
from twistedcaldav.test.util import TestCase
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
from txdav.caldav.datastore.util import dropboxIDFromCalendarObject, \
StorageTransportBase, migrateHome
from txdav.common.icommondatastore import HomeChildNameAlreadyExistsError
class DropboxIDTests(TestCase):
"""
Test dropbox ID extraction from calendar data.
"""
class FakeCalendarResource(object):
"""
Fake object resource to work with tests.
"""
def __init__(self, data):
self.ical = Component.fromString(data)
def component(self):
return self.ical
def uid(self):
return self.ical.resourceUID()
@inlineCallbacks
def test_noAttachOrXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_okXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
X-APPLE-DROPBOX:http://example.com/calendars/__uids__/1234/dropbox/12345-67890X.dropbox
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890X.dropbox"
)
@inlineCallbacks
def test_emptyXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
X-APPLE-DROPBOX:
END:VEVENT
END:VCALENDAR
""")
self.assertEquals((yield dropboxIDFromCalendarObject(resource)), "12345-67890.dropbox")
@inlineCallbacks
def test_okAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Y.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Y.dropbox"
)
@inlineCallbacks
def test_badAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
ATTACH;VALUE=URI:tag:bogus
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_inlineAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
ATTACH:bmFzZTY0
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_multipleAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
ATTACH;VALUE=URI:tag:bogus
ATTACH:bmFzZTY0
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Z.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Z.dropbox"
)
@inlineCallbacks
def test_okAttachRecurring(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=YEARLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20081114T000000Z
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Y.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Y.dropbox"
)
@inlineCallbacks
def test_okAttachAlarm(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
BEGIN:VALARM
ACTION:AUDIO
ATTACH;VALUE=URI:Ping
TRIGGER:-PT15M
X-WR-ALARMUID:5548D654-8FDA-49DB-8983-8FCAD1F322B1
END:VALARM
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_UIDbadPath(self):
test_UIDs = (
("12345/67890", "12345-67890"),
("http://12345,67890", "12345,67890"),
("https://12345,67890", "12345,67890"),
("12345:67890", "1234567890"),
("12345.67890", "1234567890"),
("12345/6:7.890", "12345-67890"),
)
for uid, result in test_UIDs:
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:%s
DTSTART:20071114T000000Z
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""" % (uid,))
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"%s.dropbox" % (result,),
)
class StorageTransportTests(TestCase):
def test_MissingContentType(self):
test_files = (
("plain.txt", MimeType.fromString("text/plain"),),
("word.doc", MimeType.fromString("application/msword"),),
("markup.html", MimeType.fromString("text/html"),),
("octet", MimeType.fromString("application/octet-stream"),),
("bogus.bog", MimeType.fromString("application/octet-stream"),),
)
class FakeAttachment(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
for filename, result in test_files:
item = StorageTransportBase(FakeAttachment(filename), None, None)
self.assertEquals(item._contentType, result)
self.assertEquals(item._dispositionName, None)
item = StorageTransportBase(FakeAttachment(filename), result, filename)
self.assertEquals(item._contentType, result)
self.assertEquals(item._dispositionName, filename)
class HomeMigrationTests(CommonCommonTests, BaseTestCase):
"""
Tests for L{migrateHome}.
"""
@inlineCallbacks
def setUp(self):
yield super(HomeMigrationTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(
u"conflict1",
u"conflict2",
u"empty_home",
u"non_empty_home",
)
)
@inlineCallbacks
def test_migrateEmptyHome(self):
"""
Migrating an empty home into an existing home should destroy all the
existing home's calendars.
"""
yield populateCalendarsFrom({
"empty_home": {
# Some of the upgrade logic will ensure that sufficient default
# calendars exist for basic usage, so this home is actually only
# *mostly* empty; the important thing is that the default
# calendar is removed.
"other-default-calendar": {}
},
"non_empty_home": {
"calendar": {},
"inbox": {},
# XXX: implementation is configuration-sensitive regarding the
# 'tasks' calendar and it shouldn't be.
"tasks": {},
"polls": {},
}
}, self.storeUnderTest())
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
self.assertIdentical((yield emptyHome.calendarWithName("calendar")), None)
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
yield migrateHome(emptyHome, nonEmpty)
yield self.commit()
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
self.assertIdentical((yield nonEmpty.calendarWithName("calendar")), None)
self.assertNotIdentical((yield nonEmpty.calendarWithName("inbox")), None)
self.assertNotIdentical((yield nonEmpty.calendarWithName("other-default-calendar")), None)
@staticmethod
def sampleEvent(uid, summary=None):
"""
Create the iCalendar text for a sample event that has no organizer nor
any attendees.
"""
if summary is None:
summary = "event " + uid
return textwrap.dedent(
"""\
BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:{uid}
DTSTART;VALUE=DATE:20060201
DURATION:P1D
CREATED:20060101T210000Z
DTSTAMP:20051222T210146Z
LAST-MODIFIED:20051222T210203Z
SEQUENCE:1
SUMMARY:{summary}
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(uid=uid, summary=summary)
), {}
@inlineCallbacks
def createConflicted(self, c1=None, c2=None):
"""
Create two calendar homes with calendars with the same names within
them. Parameters are both a mapping of calendar object names to
2-tuples of (iCalendar data, metadata).
@param c1: the calendar data for conflict1/conflicted/*
@param c2: the calendar data for conflict2/conflicted/*
"""
if c1 is None:
c1 = {"1.ics": self.sampleEvent("uid1")}
if c2 is None:
c2 = {"2.ics": self.sampleEvent("uid2")}
defaults = {"calendar": {}, "inbox": {}, "tasks": {}, "polls": {}}
def conflicted(caldata):
d = defaults.copy()
d.update(conflicted=caldata)
return d
yield populateCalendarsFrom({
"conflict1": conflicted(c1),
"conflict2": conflicted(c2),
}, self.storeUnderTest())
@inlineCallbacks
def test_migrateConflict(self):
"""
Migrating a home with conflicting (non-default) calendars will cause an
error.
"""
yield self.createConflicted()
txn = self.transactionUnderTest()
conflict1 = yield txn.calendarHomeWithUID("conflict1")
conflict2 = yield txn.calendarHomeWithUID("conflict2")
try:
yield migrateHome(conflict1, conflict2)
except HomeChildNameAlreadyExistsError:
pass
else:
self.fail("No exception raised.")
@inlineCallbacks
def test_migrateMergeCalendars(self):
"""
Migrating a home with a conflicting (non-default) calendar in merge
mode will cause the properties on the conflicting calendar to be
overridden by the new calendar of the same name, and calendar objects
to be copied over.
"""
yield self.createConflicted()
from txdav.base.propertystore.base import PropertyName
from txdav.xml import element as davxml
class StubConflictingElement(davxml.WebDAVTextElement):
namespace = "http://example.com/ns/stub-conflict"
name = "conflict"
beforeProp = StubConflictingElement.fromString("before")
afterProp = StubConflictingElement.fromString("after")
conflictPropName = PropertyName.fromElement(beforeProp)
txn = self.transactionUnderTest()
conflict1 = yield txn.calendarHomeWithUID("conflict1")
conflict2 = yield txn.calendarHomeWithUID("conflict2")
cal1 = yield conflict1.calendarWithName("conflicted")
cal2 = yield conflict2.calendarWithName("conflicted")
p1 = cal1.properties()
p2 = cal2.properties()
p1[conflictPropName] = afterProp
p2[conflictPropName] = beforeProp
yield migrateHome(conflict1, conflict2, merge=True)
self.assertEquals(p2[conflictPropName].children[0].data, "after")
obj1 = yield cal2.calendarObjectWithName("1.ics")
obj2 = yield cal2.calendarObjectWithName("2.ics")
# just a really cursory check to make sure they're really there.
self.assertEquals(obj1.uid(), "uid1")
self.assertEquals(obj2.uid(), "uid2")
@inlineCallbacks
def test_migrateMergeConflictingObjects(self):
"""
When merging two homes together, calendar objects may conflict in the
following ways:
First, an object may have the same name and the same UID as an object
in the target calendar. We assume the target object is always be newer
than the source object, so this type of conflict will leave the source
object unmodified. This type of conflict is expected, and may happen
as a result of an implicitly scheduled event where the principal owning
the merged calendars is an attendee of the conflicting object, and
received a re-invitation.
Second, an object may have a different name, but the same UID as an
object in the target calendar. While this type of conflict is not
expected -- most clients will choose names for objects that correspond
to the iCalendar UIDs of their main component -- it is treated the same
way as the first conflict.
Third, an object may have the same UID as an object on a different
calendar in the target home. This may also happen if a scheduled event
was previously on a different (most likely non-default) calendar.
Technically this is actually valid, and it is possible to have the same
object in multiple calendars as long as the object is not scheduled;
however, that type of conflict is extremely unlikely as the client
would have to generate the same event twice.
Basically, in all expected cases, conflicts will only occur because an
update to a scheduled event was sent out and the target home accepted
it. Therefore, conflicts are always resolved in favor of ignoring the
source data and trusting that the target data is more reliable.
"""
# Note: these tests are all performed with un-scheduled data because it
# is simpler. Although the expected conflicts will involve scheduled
# data the behavior will be exactly the same.
yield self.createConflicted(
{
"same-name": self.sampleEvent("same-name", "source"),
"other-name": self.sampleEvent("other-uid", "source other"),
"other-calendar": self.sampleEvent("oc", "source calendar"),
"no-conflict": self.sampleEvent("no-conflict", "okay"),
},
{
"same-name": self.sampleEvent("same-name", "target"),
"different-name": self.sampleEvent("other-uid", "tgt other"),
},
)
txn = self.transactionUnderTest()
c2 = yield txn.calendarHomeWithUID("conflict2")
otherCal = yield c2.createCalendarWithName("othercal")
yield otherCal.createCalendarObjectWithName(
"some-name", Component.fromString(
self.sampleEvent("oc", "target calendar")[0]
)
)
yield self.commit()
txn = self.transactionUnderTest()
c1 = yield txn.calendarHomeWithUID("conflict1")
c2 = yield txn.calendarHomeWithUID("conflict2")
yield migrateHome(c1, c2, merge=True)
yield self.commit()
txn = self.transactionUnderTest()
c2 = yield txn.calendarHomeWithUID("conflict2")
targetCal = yield c2.calendarWithName("conflicted")
yield self.checkSummary("same-name", "target", targetCal)
yield self.checkSummary("different-name", "tgt other", targetCal)
yield self.checkSummary("other-calendar", None, targetCal)
yield self.checkSummary("other-name", None, targetCal)
yield self.checkSummary("no-conflict", "okay", targetCal)
yield self.checkSummary("oc", "target calendar", otherCal)
@inlineCallbacks
def checkSummary(self, name, summary, cal):
"""
Verify that the summary of the calendar object for the given name in
the given calendar matches.
"""
obj = yield cal.calendarObjectWithName(name)
if summary is None:
self.assertIdentical(obj, None,
name + " existed but shouldn't have")
else:
txt = ((yield obj.component()).mainComponent()
.getProperty("SUMMARY").value())
self.assertEquals(txt, summary)
@inlineCallbacks
def test_migrateMergeDontDeleteDefault(self):
"""
If we're doing a merge migration, it's quite possible that the user has
scheduled events onto their default calendar already. In fact the
whole point of a merge migration is to preserve data that might have
been created there. So, let's make sure that we I{don't} delete any
data from the default calendars in the case that we're merging.
"""
yield populateCalendarsFrom({
"empty_home": {
# see test_migrateEmptyHome above.
"other-default-calendar": {}
},
"non_empty_home": {
"calendar": {
"some-name": self.sampleEvent("some-uid", "some summary"),
}, "inbox": {}, "tasks": {}
}
}, self.storeUnderTest())
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
self.assertIdentical((yield emptyHome.calendarWithName("calendar")),
None)
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
yield migrateHome(emptyHome, nonEmpty, merge=True)
yield self.commit()
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
self.assertNotIdentical(
(yield nonEmpty.calendarWithName("inbox")), None
)
defaultCal = (yield nonEmpty.calendarWithName("calendar"))
self.assertNotIdentical(
(yield defaultCal.calendarObjectWithName("some-name")), None
)
| {
"content_hash": "65cc0022e826f598e0fbcf0006789cec",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 98,
"avg_line_length": 33.2453781512605,
"alnum_prop": 0.6449117840351852,
"repo_name": "trevor/calendarserver",
"id": "698c15aa2777c1dfbb0f7266ea7d010367830157",
"size": "20388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/caldav/datastore/test/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "D",
"bytes": "13143"
},
{
"name": "JavaScript",
"bytes": "76566"
},
{
"name": "Python",
"bytes": "9260291"
},
{
"name": "Shell",
"bytes": "78964"
}
],
"symlink_target": ""
} |
import configparser
class Config(object):
def __init__(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
self.token = config.get('Credentials', 'Token', fallback=None)
self.master_id = config.get('Permissions', 'OwnerID', fallback=None)
self.command_prefix = config.get('Chat', 'CommandPrefix', fallback='!')
if not self.master_id:
raise ValueError("Owner wasn\'t specified in the config file")
class DatabaseConfig(object):
def __init__(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
self.dbname = config.get('Database', 'Database_Name', fallback=None)
self.user = config.get('Database', 'User', fallback=None)
self.host = config.get('Database', 'Host', fallback=None)
self.password = config.get('Database', 'Password', fallback=None)
if not self.dbname or not self.user or not self.host or not self.password:
raise ValueError("Database configuration isn\'t set")
class WeatherConfig(object):
def __init__(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
self.weather = config.get('Weather_API', 'API_KEY', fallback=None)
if not self.weather:
raise ValueError("Weather API key wasn\'t specified in the config file")
| {
"content_hash": "3120ea516c514c24a38d869722974394",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 36.23076923076923,
"alnum_prop": 0.6475583864118896,
"repo_name": "Masaliukas/Dorna",
"id": "3839f075b7f2b28dc15dba3874eb69ab6c1a2a0e",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "initialization/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71093"
}
],
"symlink_target": ""
} |
from flask import Blueprint
familias = Blueprint('familias', __name__)
from . import views | {
"content_hash": "6ccb743abd6b19033949ed7a4a50a24b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 14.285714285714286,
"alnum_prop": 0.68,
"repo_name": "originaltebas/chmembers",
"id": "5f58c6b7776e359387816e9be966f6a74864f5dc",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/familias/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "243593"
},
{
"name": "HTML",
"bytes": "279197"
},
{
"name": "JavaScript",
"bytes": "35043"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "207396"
}
],
"symlink_target": ""
} |
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, inputs):
x = self.dense1(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x) # pylint: disable=not-callable
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape((input_shape[0], 5))
test_model = Inner()
x = test_model(x) # pylint: disable=not-callable
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
class ModelSubclassingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
@test_util.run_in_graph_and_eager_modes()
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.test_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
@test_util.run_in_graph_and_eager_modes()
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
@test_util.run_in_graph_and_eager_modes()
def test_trainable_mutation(self):
# test that you can change `trainable` on a model or layer, and that
# it freezes the model state during training
# TODO(fchollet): add test after we unify BN behavior in eager and symbolic.
pass
@test_util.run_in_graph_and_eager_modes()
def test_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
fd, fname = tempfile.mkstemp('.h5')
model.save_weights(fname)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
# need to build the model before loading weights
# (otherwise no weights to load)
model._set_inputs([x1, x2])
model.load_weights(fname)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
os.close(fd)
os.remove(fname)
@test_util.run_in_graph_and_eager_modes()
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel1(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel2(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(
len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
@test_util.run_in_graph_and_eager_modes()
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
if __name__ == '__main__':
test.main()
| {
"content_hash": "39c93c36afafd957f4e4e30db435a293",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 80,
"avg_line_length": 33.765957446808514,
"alnum_prop": 0.6289119932787229,
"repo_name": "Xeralux/tensorflow",
"id": "58b144365be6cd8ea5b2ea82e275eacdee6b6c84",
"size": "19733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/_impl/keras/model_subclassing_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
""" Sample command-line program for listing Google Dataproc Clusters"""
import argparse
import os
from google.cloud import storage
import googleapiclient.discovery
# Currently only the "global" region is supported
REGION = 'global'
DEFAULT_FILENAME = 'pyspark_sort.py'
def get_default_pyspark_file():
"""Gets the PySpark file from this directory"""
current_dir = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(current_dir, DEFAULT_FILENAME), 'r')
return f, DEFAULT_FILENAME
def get_pyspark_file(filename):
f = open(filename, 'r')
return f, os.path.basename(filename)
def upload_pyspark_file(project_id, bucket_name, filename, file):
"""Uploads the PySpark file in this directory to the configured
input bucket."""
print('Uploading pyspark file to GCS')
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
def download_output(project_id, cluster_id, output_bucket, job_id):
"""Downloads the output file from Cloud Storage and returns it as a
string."""
print('Downloading output file')
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = (
'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'
.format(cluster_id, job_id))
return bucket.blob(output_blob).download_as_string()
# [START create_cluster]
def create_cluster(dataproc, project, cluster_name, zone):
print('Creating cluster.')
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
project, zone)
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
}
}
}
result = dataproc.projects().regions().clusters().create(
projectId=project,
region=REGION,
body=cluster_data).execute()
return result
# [END create_cluster]
def wait_for_cluster_creation(dataproc, project_id, cluster_name, zone):
print('Waiting for cluster creation')
while True:
result = dataproc.projects().regions().clusters().list(
projectId=project_id,
region=REGION).execute()
cluster_list = result['clusters']
cluster = [c
for c in cluster_list
if c['clusterName'] == cluster_name][0]
if cluster['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
if cluster['status']['state'] == 'RUNNING':
print("Cluster created.")
break
# [START list_clusters_with_detail]
def list_clusters_with_details(dataproc, project):
result = dataproc.projects().regions().clusters().list(
projectId=project,
region=REGION).execute()
cluster_list = result['clusters']
for cluster in cluster_list:
print("{} - {}"
.format(cluster['clusterName'], cluster['status']['state']))
return result
# [END list_clusters_with_detail]
def get_cluster_id_by_name(cluster_list, cluster_name):
"""Helper function to retrieve the ID and output bucket of a cluster by
name."""
cluster = [c for c in cluster_list if c['clusterName'] == cluster_name][0]
return cluster['clusterUuid'], cluster['config']['configBucket']
# [START submit_pyspark_job]
def submit_pyspark_job(dataproc, project, cluster_name, bucket_name, filename):
"""Submits the Pyspark job to the cluster, assuming `filename` has
already been uploaded to `bucket_name`"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)
}
}
}
result = dataproc.projects().regions().jobs().submit(
projectId=project,
region=REGION,
body=job_details).execute()
job_id = result['reference']['jobId']
print('Submitted job ID {}'.format(job_id))
return job_id
# [END submit_pyspark_job]
# [START delete]
def delete_cluster(dataproc, project, cluster):
print('Tearing down cluster')
result = dataproc.projects().regions().clusters().delete(
projectId=project,
region=REGION,
clusterName=cluster).execute()
return result
# [END delete]
# [START wait]
def wait_for_job(dataproc, project, job_id):
print('Waiting for job to finish...')
while True:
result = dataproc.projects().regions().jobs().get(
projectId=project,
region=REGION,
jobId=job_id).execute()
# Handle exceptions
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
print('Job finished')
return result
# [END wait]
# [START get_client]
def get_client():
"""Builds an http client authenticated with the service account
credentials."""
dataproc = googleapiclient.discovery.build('dataproc', 'v1')
return dataproc
# [END get_client]
def main(project_id, zone, cluster_name, bucket_name, pyspark_file=None):
dataproc = get_client()
try:
if pyspark_file:
spark_file, spark_filename = get_pyspark_file(pyspark_file)
else:
spark_file, spark_filename = get_default_pyspark_file()
create_cluster(dataproc, project_id, cluster_name, zone)
wait_for_cluster_creation(dataproc, project_id, cluster_name, zone)
upload_pyspark_file(project_id, bucket_name,
spark_filename, spark_file)
cluster_list = list_clusters_with_details(
dataproc, project_id)['clusters']
(cluster_id, output_bucket) = (
get_cluster_id_by_name(cluster_list, cluster_name))
# [START call_submit_pyspark_job]
job_id = submit_pyspark_job(
dataproc, project_id, cluster_name, bucket_name, spark_filename)
# [END call_submit_pyspark_job]
wait_for_job(dataproc, project_id, job_id)
output = download_output(project_id, cluster_id, output_bucket, job_id)
print('Received job output {}'.format(output))
return output
finally:
delete_cluster(dataproc, project_id, cluster_name)
spark_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project_id', help='Project ID you want to access.', required=True),
parser.add_argument(
'--zone', help='Region to create clusters in', required=True)
parser.add_argument(
'--cluster_name', help='Name of the cluster to create', required=True)
parser.add_argument(
'--gcs_bucket', help='Bucket to upload Pyspark file to', required=True)
parser.add_argument(
'--pyspark_file', help='Pyspark filename. Defaults to pyspark_sort.py')
args = parser.parse_args()
main(
args.project_id, args.zone,
args.cluster_name, args.gcs_bucket, args.pyspark_file)
| {
"content_hash": "7b4a4253325fcfe1375996671fca61a8",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.6258309591642925,
"repo_name": "JavaRabbit/CS496_capstone",
"id": "f165e145b67dea62af8cdf0105dc80768b4db8e0",
"size": "7934",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dataproc/create_cluster_and_submit_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36001"
},
{
"name": "HTML",
"bytes": "178761"
},
{
"name": "JavaScript",
"bytes": "51998"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "10818"
},
{
"name": "Python",
"bytes": "3450332"
},
{
"name": "Shell",
"bytes": "9121"
}
],
"symlink_target": ""
} |
__author__ = 'Bohdan Mushkevych'
import sys
import types
from six import class_types
from synergy.conf import context
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
fqn_module = '.'.join(parts[:-1])
m = __import__(fqn_module)
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
fqn_module = '.'.join(parts[:-2])
m = __import__(fqn_module)
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break
return t, m, starter
def start_by_process_name(process_name, *args):
"""
Function starts the process by:
1. retrieving its fully specified path name
2. if the path name ends with starter method - then creates an instance of the wrapping class
and calls <code>starter(*args)</code> method on it
3. if the path name ends with starter function - then retrieves its module
and calls <code>starter(*args)</code> function on it
"""
sys.stdout.write('INFO: Starter path {0} \n'.format(context.process_context[process_name].classname))
t, m, starter = get_class(context.process_context[process_name].classname)
if isinstance(m, class_types):
sys.stdout.write('INFO: Starting process by calling starter method {0} \n'.format(starter))
instance = m(process_name)
method = getattr(instance, starter)
method(*args)
elif isinstance(m, types.FunctionType):
sys.stdout.write('INFO: Starting module.\n')
_function = m
_function(*args)
else:
raise ValueError('Improper starter path {0}'.format(context.process_context[process_name].classname))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('ERROR: no Process Name specified to start \n')
elif len(sys.argv) == 2:
process_name = sys.argv[1]
start_by_process_name(process_name, None)
else:
process_name = sys.argv[1]
args = sys.argv[2:]
start_by_process_name(process_name, args)
| {
"content_hash": "c38949b8048389803523b9a707573c0c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 109,
"avg_line_length": 33.87951807228916,
"alnum_prop": 0.6173541963015647,
"repo_name": "mushkevych/scheduler",
"id": "3b5e27d87ee802ff58ec0a27aeece18cccd7703b",
"size": "2812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_starter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "44059"
},
{
"name": "HTML",
"bytes": "48705"
},
{
"name": "JavaScript",
"bytes": "85240"
},
{
"name": "Python",
"bytes": "593827"
},
{
"name": "Shell",
"bytes": "4570"
}
],
"symlink_target": ""
} |
from workspacemanager.utils import *
import sh
"""
This file will install all req which can be find in the current prject directory
"""
# il y avait installReqs
if __name__ == '__main__':
installReqs()
| {
"content_hash": "0e8d46fd61939e3a22f64d443d90a2df",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 18.083333333333332,
"alnum_prop": 0.6682027649769585,
"repo_name": "hayj/WorkspaceManager",
"id": "8d9babb3717c52e3c3e3c1e14b966fcc532d2f2a",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspacemanager/_trash/pew-install-current-req.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74947"
},
{
"name": "Shell",
"bytes": "4914"
}
],
"symlink_target": ""
} |
"""Support for Netgear LTE sensors."""
import attr
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_SENSORS
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from ..netgear_lte import DATA_KEY
DEPENDENCIES = ['netgear_lte']
SENSOR_SMS = 'sms'
SENSOR_USAGE = 'usage'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST): cv.string,
vol.Required(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In([SENSOR_SMS, SENSOR_USAGE])]),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info):
"""Set up Netgear LTE sensor devices."""
modem_data = hass.data[DATA_KEY].get_modem_data(config)
if not modem_data:
raise PlatformNotReady
sensors = []
for sensor_type in config[CONF_SENSORS]:
if sensor_type == SENSOR_SMS:
sensors.append(SMSSensor(modem_data, sensor_type))
elif sensor_type == SENSOR_USAGE:
sensors.append(UsageSensor(modem_data, sensor_type))
async_add_entities(sensors, True)
@attr.s
class LTESensor(Entity):
"""Base LTE sensor entity."""
modem_data = attr.ib()
sensor_type = attr.ib()
async def async_update(self):
"""Update state."""
await self.modem_data.async_update()
@property
def unique_id(self):
"""Return a unique ID like 'usage_5TG365AB0078V'."""
return "{}_{}".format(self.sensor_type, self.modem_data.serial_number)
class SMSSensor(LTESensor):
"""Unread SMS sensor entity."""
@property
def name(self):
"""Return the name of the sensor."""
return "Netgear LTE SMS"
@property
def state(self):
"""Return the state of the sensor."""
return self.modem_data.unread_count
class UsageSensor(LTESensor):
"""Data usage sensor entity."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "MiB"
@property
def name(self):
"""Return the name of the sensor."""
return "Netgear LTE usage"
@property
def state(self):
"""Return the state of the sensor."""
if self.modem_data.usage is None:
return None
return round(self.modem_data.usage / 1024**2, 1)
| {
"content_hash": "47e1d56063143f8af10d9b28f4fdd121",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 26.204301075268816,
"alnum_prop": 0.6512105047189167,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "339fa678d61cf41512dffcb60e42f6cf0adb2d0b",
"size": "2437",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netgear_lte/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
class Solution(object):
def combinationSum2(self, candidates, target):
candidates.sort()
table = [set() for _ in xrange(target + 1)]
for num in candidates:
if num > target:
break
for j in xrange(target - num, 0, -1):
table[num + j] |= {elt + (num,) for elt in table[j]}
table[num].add((num,))
return map(list, table[target])
candidates = [10,1,2,7,6,1,5]
target = 8
s = Solution()
print(s.combinationSum2(candidates, target)) | {
"content_hash": "db4be9b4eda25b2deb3e3d2d9fbabc08",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 33.125,
"alnum_prop": 0.5509433962264151,
"repo_name": "seanxwzhang/LeetCode",
"id": "7227fa1b7a34cb402fe8c85ec02b9d69a4a8f140",
"size": "1022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "040 Combination Sum II/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "74546"
},
{
"name": "CMake",
"bytes": "3974"
},
{
"name": "Go",
"bytes": "3619"
},
{
"name": "Java",
"bytes": "4530"
},
{
"name": "JavaScript",
"bytes": "20929"
},
{
"name": "Makefile",
"bytes": "75752"
},
{
"name": "Python",
"bytes": "112650"
},
{
"name": "SQLPL",
"bytes": "653"
},
{
"name": "Shell",
"bytes": "809"
}
],
"symlink_target": ""
} |
import unittest
from mock import patch
from securionpay import blacklist
@patch('securionpay.resource.Resource.request')
class TestBlacklist(unittest.TestCase):
def test_create(self, request):
blacklist.create({'some_param': 'some_value'})
request.assert_called_once_with('POST',
'/blacklist',
{'some_param': 'some_value'})
def test_get(self, request):
blacklist.get("blackId")
request.assert_called_once_with('GET',
'/blacklist/blackId',
None)
def test_delete(self, request):
blacklist.delete("blackId")
request.assert_called_once_with('DELETE',
'/blacklist/blackId',
None)
def test_list(self, request):
blacklist.list({'some_param': 'some_value'})
request.assert_called_once_with('GET',
'/blacklist',
{'some_param': 'some_value'})
| {
"content_hash": "83e51edf8a2d9bfb09ef1691cd399f89",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 37.93333333333333,
"alnum_prop": 0.4833040421792619,
"repo_name": "wesolutki/securionpay-python",
"id": "1ace870cc3283c876f5fa75922c6dd7f2a64a053",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_blacklist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33598"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.