text stringlengths 4 1.02M | meta dict |
|---|---|
""" Sample pyspark script to be uploaded to Cloud Storage and run on
Cloud Dataproc.
Note this file is not intended to be run directly, but run inside a PySpark
environment.
"""
# [START dataproc_pyspark_sort]
import pyspark
sc = pyspark.SparkContext()
rdd = sc.parallelize(["Hello,", "world!", "dog", "elephant", "panther"])
words = sorted(rdd.collect())
print(words)
# [END dataproc_pyspark_sort]
| {
"content_hash": "76fd6212dfa64c611db5963d8efd6d05",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 26.8,
"alnum_prop": 0.7263681592039801,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "cecba896164a3b8b49421548096bc0254fb72724",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dataproc/snippets/pyspark_sort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def set_null_to_blank(queryset, fields):
for element in queryset:
for field in fields:
value = getattr(element, field)
if value is None:
setattr(element, field, '')
element.save()
def run_data_migration(apps, schema_editor):
AdditionalField = apps.get_model('accounts', 'AdditionalField')
set_null_to_blank(AdditionalField.objects.all(), [
'text_lang1',
'text_lang2',
'text_lang3',
'text_lang4',
'text_lang5',
'help_lang1',
'help_lang2',
'help_lang3',
'help_lang4',
'help_lang5',
])
class Migration(migrations.Migration):
dependencies = [
('accounts', '0014_add_language_fields'),
]
operations = [
migrations.RunPython(run_data_migration),
]
| {
"content_hash": "404047757d29ac0023d446a76b7672c1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 67,
"avg_line_length": 22.725,
"alnum_prop": 0.5786578657865786,
"repo_name": "rdmorganiser/rdmo",
"id": "53d11bd7991817f20c073ea0767cd6db43ef8d36",
"size": "983",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/accounts/migrations/0015_data_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
from django.shortcuts import HttpResponse, redirect
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from django.middleware import csrf
def index(request):
if 'csrf_token' not in request.GET:
added_query_string = '?csrf_token=%s' if len(request.GET) == 0 else '&csrf_token=%s'
return redirect(request.get_full_path() + added_query_string % (csrf._get_new_csrf_key()))
if request.user.is_active is False:
if not request.session.exists(request.session.session_key):
request.session.create()
user = User.objects.create_user(
username=request.session._get_session_key(),
password='Anniversary110yr'
)
user = authenticate(username=request.session._get_session_key(), password="Anniversary110yr")
login(request, user)
with open('templates/index.html', 'rb') as f:
s = f.read()
return HttpResponse(s)
def articles(request):
if 'csrf_token' not in request.GET:
added_query_string = '?csrf_token=%s' if len(request.GET) == 0 else '&csrf_token=%s'
return redirect(request.get_full_path() + added_query_string % (csrf._get_new_csrf_key()))
if request.user.is_active is False:
if not request.session.exists(request.session.session_key):
request.session.create()
user = User.objects.create_user(
username=request.session._get_session_key(),
password='Anniversary110yr'
)
user = authenticate(username=request.session._get_session_key(), password="Anniversary110yr")
login(request, user)
with open('templates/articles.html', 'rb') as f:
s = f.read()
return HttpResponse(s) | {
"content_hash": "d4c8c22e49f834f52c42270864f52740",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 101,
"avg_line_length": 41.476190476190474,
"alnum_prop": 0.6555683122847302,
"repo_name": "STU-Fudan/Chitoge",
"id": "bf8181b80ba0d5034d4d972a6d05018ba431813e",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "API/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26393"
},
{
"name": "HTML",
"bytes": "4922"
},
{
"name": "JavaScript",
"bytes": "9018"
},
{
"name": "Python",
"bytes": "10651"
}
],
"symlink_target": ""
} |
from si7021 import *
from bookshelf import *
from adjustThermostat import *
from setLightOn import *
from setLightOff import *
try:
print ("Execute getTempC")
print (getTempC())
except (IOError):
print ("Failure to get Temperature")
try:
print ("Execute getHumidity")
print (getHumidity())
except (IOError):
print ("Failure to get Humidity")
try:
print ("Get from Bookshelf")
print (takeOffShelf("targetTemp"))
except (RuntimeError):
print ("Failure to get value for key")
try:
print ("Check Thermostat High")
print (adjustThermostat(75))
except (RuntimeError):
print ("Failure adjusting thermostat: High")
try:
print ("Check Thermostat Low")
print (adjustThermostat(15))
except (RuntimeError):
print ("Failure adjusting thermostat: Low")
try:
print ("Turn Light On")
print (setLightOn())
except (RuntimeError):
print ("Failure to turn light ON")
try:
print ("Turn Light OFF")
print (setLightOff())
except (RuntimeError):
print ("Failure to turn light OFF")
| {
"content_hash": "285d9d5fc7c9d91321718a3bcd817488",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 48,
"avg_line_length": 21.918367346938776,
"alnum_prop": 0.6675977653631285,
"repo_name": "webbhm/OpenAg-MVP",
"id": "f77fd5948ab7c15c33874ae4ff93b817956b4ddd",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/testScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8940"
},
{
"name": "Shell",
"bytes": "3578"
}
],
"symlink_target": ""
} |
from Isis.expressions import numpy_parse_eval_expr
from Isis.iboost.general import sendErrorMsg, sendWarningMsg
from Isis.iboost.rootio import treeToArray, arrayToTree
from Isis.iroot import ROOT as rt
from Isis.utils import string_list_filter
from Isis.statistics import freq_eff
import collections, math, numpy, pandas
class DataMgr( pandas.DataFrame ):
'''
Class to manage data, specially designed to work together
with Root files and trees
'''
def __init__( self, name = '<unnamed>', **kwargs ):
'''
Constructor mainly inherited from pandas.DataFrame. In
addition, a name is stored.
'''
pandas.DataFrame.__init__(self, **kwargs)
self.name = name
def __add__( self, other ):
'''
Allows merging two objects of this class
'''
new_name = self.name + '__' + other.name
no_booked = set(self.columns).symmetric_difference(other.columns)
if no_booked:
sendWarningMsg('{} => The following variables are not being '\
'booked: {}'.format(mgr.name, no_booked))
v = list(set(self.columns).intersection(other.columns))
m = pandas.DataFrame.__add__(self[v], other[v])
mgr = DataMgr(data = m, name = new_name)
return mgr
def __iadd__( self, other ):
'''
Allows adding another manager variables to this class
'''
if len(self):
self = self + other
else:
self = other.copy()
def copy( self, name = None, **kwargs ):
'''
Returns a copy of this class
'''
name = name or self.name + '_copy'
m = pandas.DataFrame.copy(self, **kwargs)
return DataMgr(m, name = name)
def evaluate( self, expr, mathmod = None ):
'''
Evaluate the given expression
'''
mathmod = mathmod or numpy
pexpr = numpy_parse_eval_expr(expr, mathmod)
rc = list(reversed(sorted(self.columns)))
for i, v in enumerate(rc):
pexpr = pexpr.replace(v, 'self["{{{}}}"]'.format(i))
pexpr = pexpr.format(*rc)
try:
return eval(pexpr)
except SyntaxError:
sendErrorMsg('Unable to evaluate expression "{}", which '\
'transforms into "{}"'.format(expr, pexpr))
def cutidxs( self, cut, mathmod = None ):
'''
This method allows to obtain a list with the events that satisfy
the cuts given
'''
if cut.strip():
mask = self.evaluate(cut, mathmod)
return numpy.nonzero(mask)[0]
else:
return numpy.array(self.index)
def eff( self, cut, mathmod = None, **kwargs ):
'''
Calculate the efficiency for a given cut. For more
information about the returned values, see "freq_eff".
'''
k = self.entries(cut, mathmod)
n = len(self)
return freq_eff(n, k, **kwargs)
def entries( self, cuts = '', mathmod = None ):
'''
Gets the number of entries of the class. If a cut
selection is given, it is calculated the number of
events that satisfy those cuts.
'''
if cuts:
return numpy.count_nonzero(self.evaluate(cuts, mathmod))
else:
return len(self)
def make_var( self, name, arg, function = False, mathmod = None ):
'''
Makes another variable using those in the class. There
are two different ways to do it. The first one consists
on specifying the new variable name, the name of the
variables used by the function (ordered in a list) and
the function itself. The computation of the new variable
is going to be performed passing the variables to the
function as normal entries (*args, where args is the
list of values). The second method consists on specifying
only the name of the variable and an expression in "arg".
The values will be processed then for each entry taking
into account the value obtained when evaluating the
expression.
'''
if function:
v = function(*self.as_matrix(arg).T)
else:
v = self.evaluate(arg, mathmod)
self[name] = v
def nvars( self ):
'''
Gets the number of variables in the class
'''
return len(self.columns)
def display( self, variables = None, cuts = '', mathmod = None, evts = None, prec = 3 ):
'''
Prints the information of the class as well as the values
for the first 20 events. If "evts" is introduced as an
input, the number of events showed would be that specified
by the user. If "cut" is specified only will be showed
the events that statisfy the given cut. If "prec" is
given, the number of decimal points it sets to this value.
'''
variables = variables or []
if not self.nvars():
sendErrorMsg('{} => No variables booked in this '\
'manager'.format(self.name))
return
# If no variables are specified all are printed
if variables == []:
variables = list(self.columns)
variables.sort()
# Prints the name of the manager
if self.name:
ln = 3*len(self.name)
out = '\n{0}\n{1:^{2}}\n{0}'
sep = ln*'*'
print out.format(sep, self.name, ln)
# Prints the variables. The variable "maxsize" is the
# maximum size of the numbers in the print
maxsize = 8 + prec
shortvars = []
for var in variables:
if len(var) > maxsize:
var = var[:maxsize] + '*'
shortvars.append('{0:^{1}}'.format(var, maxsize))
print '{}: {}'.format('Variables', ', '.join(variables))
vout = '| {} |'.format(' | '.join(shortvars))
deco = len(vout)*'='
print '{0}\n{1}\n{0}'.format(deco, vout)
# Prints the values of the variables
evtlst = self.cutidxs(cuts, mathmod)
form = '{:' + str(maxsize) + '.' + str(prec) + 'e}'
buildLine = lambda ievt: '| {} |'.format(
' | '.join(form.format(v) for v in self.iloc[ievt][variables])
)
if evts != None:
for ievt in evtlst[:evts]:
print buildLine(ievt)
print '{}\n'.format(deco)
else:
for ievt in evtlst:
if ievt and ievt % 20 == 0:
if raw_input(
'< Introduce q to exit, and any other '\
'input to continue printing >: '
) == 'q': break
print buildLine(ievt)
if ievt + 1 == len(evtlst):
print deco + '\n'
@staticmethod
def from_root( path, tname, columns = None, name = '<unnamed>', cuts = '', regex = False ):
'''
Create a DataMgr instance from a root file
'''
cols = columns or []
v = treeToArray(path, tname, cols, cuts, regex)
return DataMgr(data = v, columns = v.dtype.names, name = name)
def run_cut( self, var,
sense = '>',
npoints = 100,
vmin = None,
vmax = None,
endpoint = True ):
'''
Return a list with the numbers of elements satisfying a
given cut, running from "vmin" to "vmax" in "npoints".
'''
if sense not in ('>', '>=', '<', '<='):
sendErrorMsg('Unable to parse "{}" as a sense-like '\
'symbol'.format(sense))
return
values = self[var]
if vmin == None:
vmin = min(values)
if vmax == None:
vmax = max(values)
cuts = numpy.linspace(vmin, vmax, npoints, endpoint = endpoint)
var += sense
points = np.array(npoints*[0])
for ic in cuts:
ct = var + str(ic)
points[ic] = self.entries(ct)
return points
def subsample( self, cuts = '', mathmod = None, evts = None, columns = None, name = None ):
'''
Returns a copy of this class satisfying the given
requirements. A set of cuts can be specified. The range
of the events to be copied can be specified, as well as
the variables to be copied. By default the entire class
is copied.
'''
columns = columns or self.columns
name = name or self.name + '_subsample'
if evts != None:
if isinstance(evts, collections.Iterable):
evts = numpy.array(evts)
else:
evts = self.index[:evts]
else:
evts = self.index
cut_lst = self.cutidxs(cuts, mathmod)
evtlst = numpy.intersect1d(evts, cut_lst)
cmgr = DataMgr(data = self[columns].iloc[evtlst], name = name)
return cmgr
def to_root( self, name = '', tname = False, columns = None, mode = 'recreate', regex = False ):
'''
Save this instance in a Root file
'''
columns = columns or list(self.columns)
if name != '':
ofile = rt.TFile.Open(name, mode)
else:
ofile = False
name = rt.gDirectory.GetName()
print '{} => Saving tree with name "{}" in '\
'"{}"'.format(self.name, tname, name)
v = self.to_records(index = False)
arrayToTree(v, name = tname, variables = columns, use_regex = regex)
print self.name, '=> Written', len(self), 'entries'
if ofile:
ofile.Close()
def vars_in_root_tree( tree = None, fname = '', tpath = '', regexps = None ):
'''
Return variables in a tree. If "regexps" are provided, only
variables matching it will be returned.
'''
regexps = regexps or []
if not tree:
rfile = rt.TFile.Open(fname)
tree = rfile.Get(tpath)
brnames = [el.GetName() for el in tree.GetListOfBranches()]
if not tree:
rfile.Close()
if regexps != []:
truenames = []
for expr in regexps:
addlst = string_list_filter(brnames, expr)
if addlst != []:
truenames += addlst
else:
sendWarningMsg('No variables found matching '\
'expression "{}"'.format(expr))
return truenames
else:
return brnames
| {
"content_hash": "970f863a9fad457ea973920051e6b1f9",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 100,
"avg_line_length": 33.60307692307693,
"alnum_prop": 0.5212892592253456,
"repo_name": "mrpPhys/Isis",
"id": "da2174d193dc87c9a9d1234de66d267a78cb0b7b",
"size": "11496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/Isis/data_management.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "333499"
},
{
"name": "CMake",
"bytes": "19658"
},
{
"name": "Python",
"bytes": "96556"
},
{
"name": "Shell",
"bytes": "6628"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import capstone_model as ccd
import contestdata_preprocess as pp
#*************************** STATICS ************************************
outputFile = "D:/DataScience/submission60.csv"
IDcol = 'row_id'
file_TrainValues = "D:/DataScience/pp_train_values.csv"
file_TrainLabels = "D:/DataScience/pp_train_labels.csv"
file_TestValues = "D:/DataScience/pp_test_values.csv"
#*************************** MAIN ROUTINE ************************************
#pre-process all input data
pp.preprocess_data()
# Load the training data
X = pd.read_csv(file_TrainValues,index_col=0)
y = pd.read_csv(file_TrainLabels,index_col=0)
T = pd.read_csv(file_TestValues, index_col=0)
predictors = [x for x in X.columns if x not in [IDcol]]
print('Build and fit StackingCV model (no bias, complex, 15 folds)...')
model = ccd.build_StackingModelCV(posBias=False, simple=False, cvRun=15)
model.fit(X[predictors].as_matrix(), y['income'].as_matrix())
#*************************** PRODUCE OUTPUT ************************************
print("Predicting new values...")
y = model.predict(T[predictors].as_matrix())
T['income'] = y[:]
#Note: removed logarithm smoothing for income / nominal affect on score
#print("correcting income value predictions...")
#T.income = np.exp(T.income)
Z = pd.DataFrame(X['income'])
Z.index.rename('row_id', inplace=True)
Z.to_csv(outputFile)
print ("File created!")
print ("\nTotal income estimated: {0}.".format(X.income.sum()))
| {
"content_hash": "cc51094e3849f57eb37539b497a99610",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.6138743455497382,
"repo_name": "cbenge509/DataScienceCapstone_Oct2017",
"id": "615b1c4c9d18cf9a00b0c4e152fd17d46f316ccf",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submission.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18570"
}
],
"symlink_target": ""
} |
class QuotesbotPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "65dad5885f736ad062a2ad4225bf5437",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6947368421052632,
"repo_name": "thetypist/scrappybot",
"id": "2aff2d76ba60e71b0b4f006e955991239bacb4a8",
"size": "289",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "quotesbot/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6174"
}
],
"symlink_target": ""
} |
"""
randomMedia_Test.py
Created by Nikolaus Sonnenschein on 2008-02-20.
Copyright (c) 2008 Jacobs University of Bremen. All rights reserved.
"""
import unittest
import copy
from ifba.GlpkWrap.metabolism import Metabolism
from ifba.GlpkWrap.randomMedia import Almaas
from ifba.GlpkWrap import fluxdist, util
class randomMedia_Test(unittest.TestCase):
def setUp(self):
self.lp = util.ImportCplex('../models/iAF1260template.lp')
self.glp = Almaas(Metabolism(self.lp))
# self.glp.toggleVerbosity() # Verbosity is allready toggled
def testGenerateFluxDist(self):
fDist = self.glp.generateFluxdist()
self.assert_(isinstance(fDist, fluxdist.FluxDist))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "32432f8415f2f9ce726317b1ef287b27",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 69,
"avg_line_length": 29,
"alnum_prop": 0.7122015915119363,
"repo_name": "phantomas1234/fbaproject",
"id": "2d5a287a55b9ac4549310eaa50ad7ddab6c4087b",
"size": "794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ifba/GlpkWrap/randomMedia_Test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "225057"
},
{
"name": "Batchfile",
"bytes": "3609"
},
{
"name": "C",
"bytes": "3725206"
},
{
"name": "C#",
"bytes": "3511"
},
{
"name": "M4",
"bytes": "3578"
},
{
"name": "Makefile",
"bytes": "70125"
},
{
"name": "Python",
"bytes": "339645"
},
{
"name": "Shell",
"bytes": "241216"
},
{
"name": "TeX",
"bytes": "453757"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from builtins import str
import unittest
from django.contrib.contenttypes.models import ContentType
from isisdata.models import ISODateValue, ISODateRangeValue, Attribute, AttributeType
import datetime
class TestISODateValue(unittest.TestCase):
def setUp(self):
value_type = ContentType.objects.get_for_model(ISODateValue)
subject_type = ContentType.objects.get_for_model(AttributeType)
self.attribute_type = AttributeType.objects.create(
name='TestType',
value_content_type=value_type
)
self.attribute = Attribute.objects.create(
type_controlled=self.attribute_type,
source_content_type=subject_type,
source_instance_id=str(self.attribute_type.id)
)
def _do_test(self, date, precision):
value = ISODateValue.objects.create(value=date, attribute=self.attribute)
self.assertIsInstance(value.value, list)
self.assertIsInstance(value.precision, str)
self.assertEqual(value.precision, precision)
return value.value
def test_create_datevalue_from_date(self):
value = self._do_test(datetime.date.today(), 'day')
def test_create_datevalue_from_datetime(self):
value = self._do_test(datetime.datetime.now(), 'day')
def test_create_datevalue_from_str_full(self):
value = self._do_test('1999-01-05', 'day')
def test_create_datevalue_from_str_month(self):
value = self._do_test('1999-01', 'month')
def test_create_datevalue_from_str_year(self):
value = self._do_test('1999', 'year')
def test_create_datevalue_from_str_bc(self):
value = self._do_test('-1999-01-05', 'day')
self.assertLess(value[0], 0)
def test_create_datevalue_from_str_bc_month(self):
value = self._do_test('-1999-01', 'month')
self.assertLess(value[0], 0)
def test_create_datevalue_from_str_bc_year(self):
value = self._do_test('-1999', 'year')
self.assertLess(value[0], 0)
def test_create_datevalue_from_list_full(self):
value = self._do_test([1999, 0o1, 0o5], 'day')
def test_create_datevalue_from_tuple_full(self):
value = self._do_test((1999, 0o1, 0o5), 'day')
def test_create_datevalue_from_int(self):
value = self._do_test(1999, 'year')
def test_create_datevalue_from_int_bc(self):
value = self._do_test(-1999, 'year')
self.assertLess(value[0], 0)
class TestISODateRangeValue(unittest.TestCase):
def setUp(self):
value_type = ContentType.objects.get_for_model(ISODateRangeValue)
subject_type = ContentType.objects.get_for_model(AttributeType)
self.attribute_type = AttributeType.objects.create(
name='TestType',
value_content_type=value_type
)
self.attribute = Attribute.objects.create(
type_controlled=self.attribute_type,
source_content_type=subject_type,
source_instance_id=str(self.attribute_type.id)
)
def _do_test(self, date):
value = ISODateRangeValue.objects.create(value=date, attribute=self.attribute)
self.assertIsInstance(value.value, list)
return value.value
def test_create_daterangevalue_from_date(self):
date = [datetime.date.today(), datetime.date.today()]
value = self._do_test(date)
def test_create_daterangevalue_from_datetime(self):
date = [datetime.datetime.now(), datetime.datetime.now()]
value = self._do_test(date)
def test_create_daterangevalue_from_str_full(self):
value = self._do_test(['1999-01-05', '1999-01-06'])
def test_create_daterangevalue_from_str_month(self):
value = self._do_test(['1999-01', '1999-02'])
def test_create_daterangevalue_from_str_year(self):
date = ['1999', '2000']
value = self._do_test(date)
def test_create_daterangevalue_from_str_bc(self):
date = ['-1999-01-05', '-1999-01-04']
value = self._do_test(date)
self.assertLess(value[0][0], 0)
def test_create_daterangevalue_from_str_bc_month(self):
date = ['-1999-01', '-1998-01']
value = self._do_test(date)
self.assertLess(value[0][0], 0)
def test_create_daterangevalue_from_str_bc_year(self):
date = ['-1999', '-1998']
value = self._do_test(date)
self.assertLess(value[0][0], 0)
def test_create_daterangevalue_from_list_full(self):
date = [[1999, 0o1, 0o5], [1999, 0o1, 0o6]]
value = self._do_test(date)
def test_create_daterangevalue_from_tuple_full(self):
date = [(1999, 0o1, 0o5), (1999, 0o1, 0o6)]
value = self._do_test(date)
def test_create_daterangevalue_from_int(self):
date = [1999, 2000]
value = self._do_test(date)
def test_create_daterangevalue_from_int_bc(self):
date = [-1999, -1998]
value = self._do_test(date)
self.assertLess(value[0][0], 0)
| {
"content_hash": "c57529e78dc187e071a7fd12261182a9",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 86,
"avg_line_length": 35.645390070921984,
"alnum_prop": 0.6350974930362117,
"repo_name": "upconsulting/IsisCB",
"id": "a141fb348d236e36021d03eded1dce6da8190d51",
"size": "5026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isiscb/isisdata/tests/test_isodatevalues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34013"
},
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "HTML",
"bytes": "1182137"
},
{
"name": "JavaScript",
"bytes": "221954"
},
{
"name": "Less",
"bytes": "67102"
},
{
"name": "Procfile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "1758838"
},
{
"name": "Roff",
"bytes": "285"
},
{
"name": "SCSS",
"bytes": "67969"
},
{
"name": "Shell",
"bytes": "12632"
}
],
"symlink_target": ""
} |
import sys
from pprint import pprint
from django.core.management.base import BaseCommand, CommandError
from fo2.connections import db_cursor_so
class Command(BaseCommand):
help = 'Mostra o count(*) de todas as tabelas do Systêxtil'
def handle(self, *args, **options):
try:
systextil_conn = db_cursor_so()
tables = systextil_conn.introspection.table_names()
cursor = systextil_conn.cursor()
tbl_counts = {}
sql = '''
SELECT
count(*)
FROM {table}
'''
for table in tables:
# print(sql.format(table=table))
cursor.execute(sql.format(table=table))
row = cursor.fetchone()
# print(row[0])
tbl_counts[table] = row[0]
print('{:12} {}'.format(row[0], table))
# pprint(tbl_counts)
except Exception as e:
raise CommandError('Error counting in tables. {}'.format(e))
| {
"content_hash": "c8c7691304c6127dcd83b9d9ce47b0ee",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 29.742857142857144,
"alnum_prop": 0.5312199807877042,
"repo_name": "anselmobd/fo2",
"id": "84e9a84cf9e30adb5e515e5da96e8abd3135372f",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils/management/commands/tables_counts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
"""
setup.py
-allows the foodlog package to be installed via:
$ python setup.py install
-or installed for development via:
$ python setup.py develop
-once installed, the package can be imported from anywhere on the system
(as opposed to just when within the same directory)
"""
from setuptools import setup
setup(name='FoodLog',
version='0.1.0',
description='Web service backend to my food log application webpage',
author='Austin Jones',
author_email='oostin623@gmail.com',
packages=['foodlog', 'foodlog.resources', 'foodlog.common', 'foodlog.test'],
zip_safe=False,
install_requires=["Flask == 0.10.1",
"flask_restful == 0.3.4"])
| {
"content_hash": "787e2d0c275ae8e875cd18e6a7f2f7d2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.6470588235294118,
"repo_name": "oostin623/FoodLog",
"id": "2ddc51521de9c0a1acc87c57feea432bd014f04a",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15235"
}
],
"symlink_target": ""
} |
"""Reconstruction Evaluation."""
from os import path
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
from tensorflow_graphics.projects.nasa.lib import utils
tf.disable_eager_execution()
flags = tf.app.flags
logging = tf.logging
tf.logging.set_verbosity(tf.logging.INFO)
utils.define_flags()
FLAGS = flags.FLAGS
def build_eval_graph(input_fn, model_fn, hparams):
"""Build the evaluation computation graph."""
dataset = input_fn(None)
batch = dataset.make_one_shot_iterator().get_next()
batch_holder = {
"transform":
tf.placeholder(
tf.float32,
[1, 1, hparams.n_parts, hparams.n_dims + 1, hparams.n_dims + 1]),
"joint":
tf.placeholder(tf.float32, [1, 1, hparams.n_parts, hparams.n_dims]),
"point":
tf.placeholder(tf.float32, [1, 1, None, hparams.n_dims]),
"label":
tf.placeholder(tf.float32, [1, 1, None, 1]),
}
latent_holder, latent, occ = model_fn(batch_holder, None, None, "gen_mesh")
# Eval Summary
iou_holder = tf.placeholder(tf.float32, [])
best_holder = tf.placeholder(tf.float32, [])
tf.summary.scalar("IoU", iou_holder)
tf.summary.scalar("Best_IoU", best_holder)
return {
"batch_holder": batch_holder,
"latent_holder": latent_holder,
"latent": latent,
"occ": occ,
"batch": batch,
"iou_holder": iou_holder,
"best_holder": best_holder,
"merged_summary": tf.summary.merge_all(),
}
def evaluate(hook_dict, ckpt, saver, best_iou, hparams):
"""Evaluate a checkpoint on the whole test set."""
batch = hook_dict["batch"]
merged_summary = hook_dict["merged_summary"]
iou_holder = hook_dict["iou_holder"]
best_holder = hook_dict["best_holder"]
batch_holder = hook_dict["batch_holder"]
latent_holder = hook_dict["latent_holder"]
latent = hook_dict["latent"]
occ = hook_dict["occ"]
global_step = utils.parse_global_step(ckpt)
assignment_map = {
"shape/": "shape/",
}
tf.train.init_from_checkpoint(ckpt, assignment_map)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
accum_iou = 0.
example_cnt = 0
while True:
try:
batch_val = sess.run(batch)
feed_dict = {
batch_holder["transform"]: batch_val["transform"],
batch_holder["joint"]: batch_val["joint"],
}
iou = utils.compute_iou(sess, feed_dict, latent_holder,
batch_holder["point"], latent, occ[:, -1:],
batch_val["points"], batch_val["labels"],
hparams)
accum_iou += iou
example_cnt += 1
if hparams.gen_mesh_only > 0:
# Generate meshes for evaluation
unused_var = utils.save_mesh(
sess,
feed_dict,
latent_holder,
batch_holder["point"],
latent,
occ,
batch_val,
hparams,
)
logging.info("Generated mesh No.{}".format(example_cnt))
except tf.errors.OutOfRangeError:
accum_iou /= example_cnt
if best_iou < accum_iou:
best_iou = accum_iou
saver.save(sess, path.join(hparams.train_dir, "best", "model.ckpt"),
global_step)
summary = sess.run(
merged_summary,
utils.make_summary_feed_dict(
iou_holder,
accum_iou,
best_holder,
best_iou,
))
# If only generating meshes for the sequence, we can determinate the
# evaluation after the first full loop over the test set.
if hparams.gen_mesh_only:
exit(0)
break
return summary, global_step
def main(unused_argv):
tf.random.set_random_seed(20200823)
np.random.seed(20200823)
input_fn = datasets.get_dataset("test", FLAGS)
model_fn = models.get_model(FLAGS)
best_iou = 0.
with tf.summary.FileWriter(path.join(FLAGS.train_dir, "eval")) as eval_writer:
hook_dict = build_eval_graph(input_fn, model_fn, FLAGS)
saver = tf.train.Saver()
for ckpt in tf.train.checkpoints_iterator(FLAGS.train_dir, timeout=1800):
summary, global_step = evaluate(hook_dict, ckpt, saver, best_iou, FLAGS)
eval_writer.add_summary(summary, global_step)
eval_writer.flush()
if __name__ == "__main__":
tf.app.run(main)
| {
"content_hash": "82795909eef8a21dbade35956412ccd7",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 80,
"avg_line_length": 29.84967320261438,
"alnum_prop": 0.5947011167068097,
"repo_name": "tensorflow/graphics",
"id": "3e7335ceb844d3fdd0595df24d693543b2aa483b",
"size": "5153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_graphics/projects/nasa/eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2403"
},
{
"name": "C++",
"bytes": "115377"
},
{
"name": "Cython",
"bytes": "12955"
},
{
"name": "JavaScript",
"bytes": "22252"
},
{
"name": "Jupyter Notebook",
"bytes": "246839"
},
{
"name": "Python",
"bytes": "2222139"
},
{
"name": "Shell",
"bytes": "4281"
},
{
"name": "Starlark",
"bytes": "2233"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def invoke_endpoint(EndpointName=None, Body=None, ContentType=None, Accept=None, CustomAttributes=None, TargetModel=None):
"""
After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.
For an overview of Amazon SageMaker, see How It Works .
Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.
Calls to InvokeEndpoint are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference .
A customer\'s model containers must respond to requests within 60 seconds. The model itself can have a maximum processing time of 60 seconds before responding to the /invocations. If your model is going to take 50-60 seconds of processing time, the SDK socket timeout should be set to be 70 seconds.
See also: AWS API Documentation
Exceptions
:example: response = client.invoke_endpoint(
EndpointName='string',
Body=b'bytes'|file,
ContentType='string',
Accept='string',
CustomAttributes='string',
TargetModel='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.\n
:type Body: bytes or seekable file-like object
:param Body: [REQUIRED]\nProvides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.\nFor information about the format of the request body, see Common Data Formats\xe2\x80\x94Inference .\n
:type ContentType: string
:param ContentType: The MIME type of the input data in the request body.
:type Accept: string
:param Accept: The desired MIME type of the inference in the response.
:type CustomAttributes: string
:param CustomAttributes: Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.
:type TargetModel: string
:param TargetModel: Specifies the model to be requested for an inference when invoking a multi-model endpoint.
:rtype: dict
ReturnsResponse Syntax
{
'Body': StreamingBody(),
'ContentType': 'string',
'InvokedProductionVariant': 'string',
'CustomAttributes': 'string'
}
Response Structure
(dict) --
Body (StreamingBody) --
Includes the inference provided by the model.
For information about the format of the response body, see Common Data Formats\xe2\x80\x94Inference .
ContentType (string) --
The MIME type of the inference returned in the response body.
InvokedProductionVariant (string) --
Identifies the production variant that was invoked.
CustomAttributes (string) --
Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back.
This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.
Exceptions
SageMakerRuntime.Client.exceptions.InternalFailure
SageMakerRuntime.Client.exceptions.ServiceUnavailable
SageMakerRuntime.Client.exceptions.ValidationError
SageMakerRuntime.Client.exceptions.ModelError
:return: {
'Body': StreamingBody(),
'ContentType': 'string',
'InvokedProductionVariant': 'string',
'CustomAttributes': 'string'
}
:returns:
SageMakerRuntime.Client.exceptions.InternalFailure
SageMakerRuntime.Client.exceptions.ServiceUnavailable
SageMakerRuntime.Client.exceptions.ValidationError
SageMakerRuntime.Client.exceptions.ModelError
"""
pass
| {
"content_hash": "f135609f2f52d74472357faf64f03f21",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 665,
"avg_line_length": 44.729281767955804,
"alnum_prop": 0.7561758893280632,
"repo_name": "wavycloud/pyboto3",
"id": "fdffd5917166a55fe1f4a39d5361bee3fcf43964",
"size": "8096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyboto3/sagemakerruntime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9182360"
}
],
"symlink_target": ""
} |
import numpy as np
import itertools
import copy
from dipy.segment.clustering import Cluster, ClusterCentroid
from dipy.segment.clustering import ClusterMap, ClusterMapCentroid
from dipy.segment.clustering import Clustering
from nose.tools import assert_equal, assert_true, assert_false
from numpy.testing import assert_array_equal, assert_raises, run_module_suite
from dipy.testing import assert_arrays_equal
features_shape = (1, 10)
dtype = "float32"
features = np.ones(features_shape, dtype=dtype)
data = [np.arange(3*5, dtype=dtype).reshape((-1, 3)),
np.arange(3*10, dtype=dtype).reshape((-1, 3)),
np.arange(3*15, dtype=dtype).reshape((-1, 3)),
np.arange(3*17, dtype=dtype).reshape((-1, 3)),
np.arange(3*20, dtype=dtype).reshape((-1, 3))]
expected_clusters = [[2, 4], [0, 3], [1]]
def test_cluster_attributes_and_constructor():
cluster = Cluster()
assert_equal(type(cluster), Cluster)
assert_equal(cluster.id, 0)
assert_array_equal(cluster.indices, [])
assert_equal(len(cluster), 0)
# Duplicate
assert_equal(cluster, Cluster(cluster.id, cluster.indices, cluster.refdata))
assert_false(cluster != Cluster(cluster.id, cluster.indices, cluster.refdata))
# Invalid comparison
assert_raises(TypeError, cluster.__cmp__, cluster)
def test_cluster_assign():
cluster = Cluster()
indices = []
for idx in range(1, 10):
cluster.assign(idx)
indices.append(idx)
assert_equal(len(cluster), idx)
assert_equal(type(cluster.indices), list)
assert_array_equal(cluster.indices, indices)
# Test add multiples indices at the same time
cluster = Cluster()
cluster.assign(*range(1, 10))
assert_array_equal(cluster.indices, indices)
def test_cluster_iter():
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
# Test without specifying refdata
cluster = Cluster()
cluster.assign(*indices)
assert_array_equal(cluster.indices, indices)
assert_array_equal(list(cluster), indices)
# Test with specifying refdata in ClusterMap
cluster.refdata = data
assert_arrays_equal(list(cluster), [data[i] for i in indices])
def test_cluster_getitem():
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
advanced_indices = indices + [0, 1, 2, -1, -2, -3]
# Test without specifying refdata in ClusterMap
cluster = Cluster()
cluster.assign(*indices)
# Test indexing
for i in advanced_indices:
assert_equal(cluster[i], indices[i])
# Test advanced indexing
assert_array_equal(cluster[advanced_indices], [indices[i] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster.__getitem__, len(cluster))
assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)
# Test slicing and negative indexing
assert_equal(cluster[-1], indices[-1])
assert_array_equal(cluster[::2], indices[::2])
assert_arrays_equal(cluster[::-1], indices[::-1])
assert_arrays_equal(cluster[:-1], indices[:-1])
assert_arrays_equal(cluster[1:], indices[1:])
# Test with wrong indexing object
assert_raises(TypeError, cluster.__getitem__, "wrong")
# Test with specifying refdata in ClusterMap
cluster.refdata = data
# Test indexing
for i in advanced_indices:
assert_array_equal(cluster[i], data[indices[i]])
# Test advanced indexing
assert_array_equal(cluster[advanced_indices], [data[indices[i]] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster.__getitem__, len(cluster))
assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)
# Test slicing and negative indexing
assert_array_equal(cluster[-1], data[indices[-1]])
assert_arrays_equal(cluster[::2], [data[i] for i in indices[::2]])
assert_arrays_equal(cluster[::-1], [data[i] for i in indices[::-1]])
assert_arrays_equal(cluster[:-1], [data[i] for i in indices[:-1]])
assert_arrays_equal(cluster[1:], [data[i] for i in indices[1:]])
# Test with wrong indexing object
assert_raises(TypeError, cluster.__getitem__, "wrong")
def test_cluster_str_and_repr():
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
# Test without specifying refdata in ClusterMap
cluster = Cluster()
cluster.assign(*indices)
assert_equal(str(cluster), "[" + ", ".join(map(str, indices)) + "]")
assert_equal(repr(cluster), "Cluster([" + ", ".join(map(str, indices)) + "])")
# Test with specifying refdata in ClusterMap
cluster.refdata = data
assert_equal(str(cluster), "[" + ", ".join(map(str, indices)) + "]")
assert_equal(repr(cluster), "Cluster([" + ", ".join(map(str, indices)) + "])")
def test_cluster_centroid_attributes_and_constructor():
centroid = np.zeros(features_shape)
cluster = ClusterCentroid(centroid)
assert_equal(type(cluster), ClusterCentroid)
assert_equal(cluster.id, 0)
assert_array_equal(cluster.indices, [])
assert_array_equal(cluster.centroid, np.zeros(features_shape))
assert_equal(len(cluster), 0)
# Duplicate
assert_equal(cluster, ClusterCentroid(centroid))
assert_false(cluster != ClusterCentroid(centroid))
assert_false(cluster == ClusterCentroid(centroid+1))
# Invalid comparison
assert_raises(TypeError, cluster.__cmp__, cluster)
def test_cluster_centroid_assign():
centroid = np.zeros(features_shape)
cluster = ClusterCentroid(centroid)
indices = []
centroid = np.zeros(features_shape, dtype=dtype)
for idx in range(1, 10):
cluster.assign(idx, (idx+1) * features)
cluster.update()
indices.append(idx)
centroid = (centroid * (idx-1) + (idx+1) * features) / idx
assert_equal(len(cluster), idx)
assert_equal(type(cluster.indices), list)
assert_array_equal(cluster.indices, indices)
assert_equal(type(cluster.centroid), np.ndarray)
assert_array_equal(cluster.centroid, centroid)
def test_cluster_centroid_iter():
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
# Test without specifying refdata in ClusterCentroid
centroid = np.zeros(features_shape)
cluster = ClusterCentroid(centroid)
for idx in indices:
cluster.assign(idx, (idx+1)*features)
assert_array_equal(cluster.indices, indices)
assert_array_equal(list(cluster), indices)
# Test with specifying refdata in ClusterCentroid
cluster.refdata = data
assert_arrays_equal(list(cluster), [data[i] for i in indices])
def test_cluster_centroid_getitem():
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
advanced_indices = indices + [0, 1, 2, -1, -2, -3]
# Test without specifying refdata in ClusterCentroid
centroid = np.zeros(features_shape)
cluster = ClusterCentroid(centroid)
for idx in indices:
cluster.assign(idx, (idx+1)*features)
# Test indexing
for i in advanced_indices:
assert_equal(cluster[i], indices[i])
# Test advanced indexing
assert_array_equal(cluster[advanced_indices], [indices[i] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster.__getitem__, len(cluster))
assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)
# Test slicing and negative indexing
assert_equal(cluster[-1], indices[-1])
assert_array_equal(cluster[::2], indices[::2])
assert_arrays_equal(cluster[::-1], indices[::-1])
assert_arrays_equal(cluster[:-1], indices[:-1])
assert_arrays_equal(cluster[1:], indices[1:])
# Test with specifying refdata in ClusterCentroid
cluster.refdata = data
# Test indexing
for i in advanced_indices:
assert_array_equal(cluster[i], data[indices[i]])
# Test advanced indexing
assert_array_equal(cluster[advanced_indices], [data[indices[i]] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster.__getitem__, len(cluster))
assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)
# Test slicing and negative indexing
assert_array_equal(cluster[-1], data[indices[-1]])
assert_arrays_equal(cluster[::2], [data[i] for i in indices[::2]])
assert_arrays_equal(cluster[::-1], [data[i] for i in indices[::-1]])
assert_arrays_equal(cluster[:-1], [data[i] for i in indices[:-1]])
assert_arrays_equal(cluster[1:], [data[i] for i in indices[1:]])
def test_cluster_map_attributes_and_constructor():
clusters = ClusterMap()
assert_equal(len(clusters), 0)
assert_array_equal(clusters.clusters, [])
assert_array_equal(list(clusters), [])
assert_raises(IndexError, clusters.__getitem__, 0)
assert_raises(AttributeError, setattr, clusters, 'clusters', [])
def test_cluster_map_add_cluster():
clusters = ClusterMap()
list_of_cluster_objects = []
list_of_indices = []
for i in range(3):
cluster = Cluster()
list_of_cluster_objects.append(cluster)
list_of_indices.append([])
for id_data in range(2 * i):
list_of_indices[-1].append(id_data)
cluster.assign(id_data)
clusters.add_cluster(cluster)
assert_equal(type(cluster), Cluster)
assert_equal(len(clusters), i+1)
assert_equal(cluster, clusters[-1])
assert_array_equal(list(itertools.chain(*clusters)), list(itertools.chain(*list_of_indices)))
# Test adding multiple clusters at once.
clusters = ClusterMap()
clusters.add_cluster(*list_of_cluster_objects)
assert_array_equal(list(itertools.chain(*clusters)), list(itertools.chain(*list_of_indices)))
def test_cluster_map_remove_cluster():
clusters = ClusterMap()
cluster1 = Cluster(indices=[1])
clusters.add_cluster(cluster1)
cluster2 = Cluster(indices=[1, 2])
clusters.add_cluster(cluster2)
cluster3 = Cluster(indices=[1, 2, 3])
clusters.add_cluster(cluster3)
assert_equal(len(clusters), 3)
clusters.remove_cluster(cluster2)
assert_equal(len(clusters), 2)
assert_array_equal(list(itertools.chain(*clusters)), list(itertools.chain(*[cluster1, cluster3])))
assert_equal(clusters[0], cluster1)
assert_equal(clusters[1], cluster3)
clusters.remove_cluster(cluster3)
assert_equal(len(clusters), 1)
assert_array_equal(list(itertools.chain(*clusters)), list(cluster1))
assert_equal(clusters[0], cluster1)
clusters.remove_cluster(cluster1)
assert_equal(len(clusters), 0)
assert_array_equal(list(itertools.chain(*clusters)), [])
# Test removing multiple clusters at once.
clusters = ClusterMap()
clusters.add_cluster(cluster1, cluster2, cluster3)
clusters.remove_cluster(cluster3, cluster2)
assert_equal(len(clusters), 1)
assert_array_equal(list(itertools.chain(*clusters)), list(cluster1))
assert_equal(clusters[0], cluster1)
clusters = ClusterMap()
clusters.add_cluster(cluster2, cluster1, cluster3)
clusters.remove_cluster(cluster1, cluster3, cluster2)
assert_equal(len(clusters), 0)
assert_array_equal(list(itertools.chain(*clusters)), [])
def test_cluster_map_clear():
nb_clusters = 11
clusters = ClusterMap()
for i in range(nb_clusters):
new_cluster = Cluster(indices=range(i))
clusters.add_cluster(new_cluster)
clusters.clear()
assert_equal(len(clusters), 0)
assert_array_equal(list(itertools.chain(*clusters)), [])
def test_cluster_map_iter():
rng = np.random.RandomState(42)
nb_clusters = 11
# Test without specifying refdata in ClusterMap
cluster_map = ClusterMap()
clusters = []
for i in range(nb_clusters):
new_cluster = Cluster(indices=rng.randint(0, len(data), size=10))
cluster_map.add_cluster(new_cluster)
clusters.append(new_cluster)
assert_true(all([c1 is c2 for c1, c2 in zip(cluster_map.clusters, clusters)]))
assert_array_equal(cluster_map, clusters)
assert_array_equal(cluster_map.clusters, clusters)
assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])
# Set refdata
cluster_map.refdata = data
assert_array_equal(cluster_map, [[data[i] for i in cluster.indices] for cluster in clusters])
# Remove refdata, i.e. back to indices
cluster_map.refdata = None
assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])
def test_cluster_map_getitem():
nb_clusters = 11
indices = list(range(nb_clusters))
np.random.shuffle(indices) # None trivial ordering
advanced_indices = indices + [0, 1, 2, -1, -2, -3]
cluster_map = ClusterMap()
clusters = []
for i in range(nb_clusters):
new_cluster = Cluster(indices=range(i))
cluster_map.add_cluster(new_cluster)
clusters.append(new_cluster)
# Test indexing
for i in advanced_indices:
assert_equal(cluster_map[i], clusters[i])
# Test advanced indexing
assert_array_equal(cluster_map[advanced_indices], [clusters[i] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster_map.__getitem__, len(clusters))
assert_raises(IndexError, cluster_map.__getitem__, -len(clusters)-1)
# Test slicing and negative indexing
assert_equal(cluster_map[-1], clusters[-1])
assert_array_equal(cluster_map[::2], clusters[::2])
assert_arrays_equal(cluster_map[::-1], clusters[::-1])
assert_arrays_equal(cluster_map[:-1], clusters[:-1])
assert_arrays_equal(cluster_map[1:], clusters[1:])
def test_cluster_map_str_and_repr():
nb_clusters = 11
cluster_map = ClusterMap()
clusters = []
for i in range(nb_clusters):
new_cluster = Cluster(indices=range(i))
cluster_map.add_cluster(new_cluster)
clusters.append(new_cluster)
expected_str = "[" + ", ".join(map(str, clusters)) + "]"
assert_equal(str(cluster_map), expected_str)
assert_equal(repr(cluster_map), "ClusterMap(" + expected_str + ")")
def test_cluster_map_get_size():
nb_clusters = 11
cluster_map = ClusterMap()
clusters = [Cluster() for i in range(nb_clusters)]
cluster_map.add_cluster(*clusters)
assert_equal(len(cluster_map), nb_clusters)
assert_equal(cluster_map.get_size(), nb_clusters)
def test_cluster_map_get_clusters_sizes():
rng = np.random.RandomState(42)
nb_clusters = 11
# Generate random indices
indices = [range(rng.randint(1, 10)) for i in range(nb_clusters)]
cluster_map = ClusterMap()
clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
cluster_map.add_cluster(*clusters)
assert_equal(cluster_map.get_clusters_sizes(), list(map(len, indices)))
def test_cluster_map_get_small_and_large_clusters():
rng = np.random.RandomState(42)
nb_clusters = 11
cluster_map = ClusterMap()
# Randomly generate small clusters
indices = [rng.randint(0, 10, size=i) for i in range(1, nb_clusters+1)]
small_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
cluster_map.add_cluster(*small_clusters)
# Randomly generate small clusters
indices = [rng.randint(0, 10, size=i) for i in range(nb_clusters+1, 2*nb_clusters+1)]
large_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
cluster_map.add_cluster(*large_clusters)
assert_equal(len(cluster_map), 2*nb_clusters)
assert_equal(len(cluster_map.get_small_clusters(nb_clusters)), len(small_clusters))
assert_arrays_equal(cluster_map.get_small_clusters(nb_clusters), small_clusters)
assert_equal(len(cluster_map.get_large_clusters(nb_clusters+1)), len(large_clusters))
assert_arrays_equal(cluster_map.get_large_clusters(nb_clusters+1), large_clusters)
def test_cluster_map_comparison_with_int():
clusters1_indices = range(10)
clusters2_indices = range(10, 15)
clusters3_indices = [15]
# Build a test ClusterMap
clusters = ClusterMap()
cluster1 = Cluster()
cluster1.assign(*clusters1_indices)
clusters.add_cluster(cluster1)
cluster2 = Cluster()
cluster2.assign(*clusters2_indices)
clusters.add_cluster(cluster2)
cluster3 = Cluster()
cluster3.assign(*clusters3_indices)
clusters.add_cluster(cluster3)
subset = clusters < 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters3_indices)
subset = clusters <= 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters2_indices)
assert_array_equal(list(clusters[subset][1]), clusters3_indices)
subset = clusters == 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters2_indices)
subset = clusters != 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
assert_array_equal(list(clusters[subset][1]), clusters3_indices)
subset = clusters > 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
subset = clusters >= 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
assert_array_equal(list(clusters[subset][1]), clusters2_indices)
def test_cluster_map_comparison_with_object():
nb_clusters = 4
cluster_map = ClusterMap()
#clusters = []
for i in range(nb_clusters):
new_cluster = Cluster(indices=range(i))
cluster_map.add_cluster(new_cluster)
#clusters.append(new_cluster)
# Comparison with another ClusterMap object
other_cluster_map = copy.deepcopy(cluster_map)
assert_true(cluster_map == other_cluster_map)
other_cluster_map = copy.deepcopy(cluster_map)
assert_false(cluster_map != other_cluster_map)
other_cluster_map = copy.deepcopy(cluster_map)
assert_raises(NotImplementedError, cluster_map.__le__, other_cluster_map)
# Comparison with an object that is not a ClusterMap or int
assert_raises(NotImplementedError, cluster_map.__le__, float(42))
def test_cluster_map_centroid_attributes_and_constructor():
clusters = ClusterMapCentroid()
assert_array_equal(clusters.centroids, [])
assert_raises(AttributeError, setattr, clusters, 'centroids', [])
def test_cluster_map_centroid_add_cluster():
clusters = ClusterMapCentroid()
centroids = []
for i in range(3):
cluster = ClusterCentroid(centroid=np.zeros_like(features))
centroids.append(np.zeros_like(features))
for id_data in range(2*i):
centroids[-1] = (centroids[-1]*id_data + (id_data+1)*features) / (id_data+1)
cluster.assign(id_data, (id_data+1)*features)
cluster.update()
clusters.add_cluster(cluster)
assert_array_equal(cluster.centroid, centroids[-1])
assert_equal(type(cluster), ClusterCentroid)
assert_equal(cluster, clusters[-1])
assert_equal(type(clusters.centroids), list)
assert_array_equal(list(itertools.chain(*clusters.centroids)), list(itertools.chain(*centroids)))
# Check adding features of different sizes (shorter and longer)
features_shape_short = (1, features_shape[1]-3)
features_too_short = np.ones(features_shape_short, dtype=dtype)
assert_raises(ValueError, cluster.assign, 123, features_too_short)
features_shape_long = (1, features_shape[1]+3)
features_too_long = np.ones(features_shape_long, dtype=dtype)
assert_raises(ValueError, cluster.assign, 123, features_too_long)
def test_cluster_map_centroid_remove_cluster():
clusters = ClusterMapCentroid()
centroid1 = np.random.rand(*features_shape).astype(dtype)
cluster1 = ClusterCentroid(centroid1, indices=[1])
clusters.add_cluster(cluster1)
centroid2 = np.random.rand(*features_shape).astype(dtype)
cluster2 = ClusterCentroid(centroid2, indices=[1, 2])
clusters.add_cluster(cluster2)
centroid3 = np.random.rand(*features_shape).astype(dtype)
cluster3 = ClusterCentroid(centroid3, indices=[1, 2, 3])
clusters.add_cluster(cluster3)
assert_equal(len(clusters), 3)
clusters.remove_cluster(cluster2)
assert_equal(len(clusters), 2)
assert_array_equal(list(itertools.chain(*clusters)), list(itertools.chain(*[cluster1, cluster3])))
assert_array_equal(clusters.centroids, np.array([centroid1, centroid3]))
assert_equal(clusters[0], cluster1)
assert_equal(clusters[1], cluster3)
clusters.remove_cluster(cluster3)
assert_equal(len(clusters), 1)
assert_array_equal(list(itertools.chain(*clusters)), list(cluster1))
assert_array_equal(clusters.centroids, np.array([centroid1]))
assert_equal(clusters[0], cluster1)
clusters.remove_cluster(cluster1)
assert_equal(len(clusters), 0)
assert_array_equal(list(itertools.chain(*clusters)), [])
assert_array_equal(clusters.centroids, [])
def test_cluster_map_centroid_iter():
rng = np.random.RandomState(42)
nb_clusters = 11
cluster_map = ClusterMapCentroid()
clusters = []
for i in range(nb_clusters):
new_centroid = np.zeros_like(features)
new_cluster = ClusterCentroid(new_centroid, indices=rng.randint(0, len(data), size=10))
cluster_map.add_cluster(new_cluster)
clusters.append(new_cluster)
assert_true(all([c1 is c2 for c1, c2 in zip(cluster_map.clusters, clusters)]))
assert_array_equal(cluster_map, clusters)
assert_array_equal(cluster_map.clusters, clusters)
assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])
# Set refdata
cluster_map.refdata = data
assert_array_equal(cluster_map, [[data[i] for i in cluster.indices] for cluster in clusters])
def test_cluster_map_centroid_getitem():
nb_clusters = 11
indices = list(range(len(data)))
np.random.shuffle(indices) # None trivial ordering
advanced_indices = indices + [0, 1, 2, -1, -2, -3]
cluster_map = ClusterMapCentroid()
clusters = []
for i in range(nb_clusters):
centroid = np.zeros_like(features)
cluster = ClusterCentroid(centroid)
cluster.id = cluster_map.add_cluster(cluster)
clusters.append(cluster)
# Test indexing
for i in advanced_indices:
assert_equal(cluster_map[i], clusters[i])
# Test advanced indexing
assert_array_equal(cluster_map[advanced_indices], [clusters[i] for i in advanced_indices])
# Test index out of bounds
assert_raises(IndexError, cluster_map.__getitem__, len(clusters))
assert_raises(IndexError, cluster_map.__getitem__, -len(clusters)-1)
# Test slicing and negative indexing
assert_equal(cluster_map[-1], clusters[-1])
assert_array_equal(cluster_map[::2], clusters[::2])
assert_arrays_equal(cluster_map[::-1], clusters[::-1])
assert_arrays_equal(cluster_map[:-1], clusters[:-1])
assert_arrays_equal(cluster_map[1:], clusters[1:])
def test_cluster_map_centroid_comparison_with_int():
clusters1_indices = range(10)
clusters2_indices = range(10, 15)
clusters3_indices = [15]
# Build a test ClusterMapCentroid
centroid = np.zeros_like(features)
cluster1 = ClusterCentroid(centroid.copy())
for i in clusters1_indices:
cluster1.assign(i, features)
cluster2 = ClusterCentroid(centroid.copy())
for i in clusters2_indices:
cluster2.assign(i, features)
cluster3 = ClusterCentroid(centroid.copy())
for i in clusters3_indices:
cluster3.assign(i, features)
# Update centroids
cluster1.update()
cluster2.update()
cluster3.update()
clusters = ClusterMapCentroid()
clusters.add_cluster(cluster1)
clusters.add_cluster(cluster2)
clusters.add_cluster(cluster3)
subset = clusters < 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters3_indices)
subset = clusters <= 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters2_indices)
assert_array_equal(list(clusters[subset][1]), clusters3_indices)
subset = clusters == 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters2_indices)
subset = clusters != 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
assert_array_equal(list(clusters[subset][1]), clusters3_indices)
subset = clusters > 5
assert_equal(subset.sum(), 1)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
subset = clusters >= 5
assert_equal(subset.sum(), 2)
assert_array_equal(list(clusters[subset][0]), clusters1_indices)
assert_array_equal(list(clusters[subset][1]), clusters2_indices)
def test_subclassing_clustering():
class SubClustering(Clustering):
def cluster(self, data, ordering=None):
pass
clustering_algo = SubClustering()
assert_raises(NotImplementedError, super(SubClustering, clustering_algo).cluster, None)
if __name__ == '__main__':
run_module_suite()
| {
"content_hash": "c4e4e357a43c33f2a5a387cbb6a3679a",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 102,
"avg_line_length": 34.61126373626374,
"alnum_prop": 0.680080962019288,
"repo_name": "rfdougherty/dipy",
"id": "24b44f1dae65e238cf432f7c2474f980f4b16f58",
"size": "25198",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dipy/segment/tests/test_clustering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2694"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2301531"
}
],
"symlink_target": ""
} |
import os, psycopg2, urllib.parse
from flask import Flask, request, g, url_for, render_template
from contextlib import closing
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
urllib.parse.uses_netloc.append("postgres")
url = urllib.parse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
return conn
def init_db():
with closing(connect_db()) as db:
with app.open_resource('populate_db.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def select_team():
cur = g.db.cursor()
cur.execute("select teamabbr, teamfull from Dates order by teamfull;")
teamData = [dict(abbr=row[0], full=row[1]) for row in cur.fetchall()]
return render_template('select_team.html', teams=teamData)
@app.route('/countdown')
def show_countdown():
cur = g.db.cursor()
cur.execute("select * from Dates where teamabbr = '%s';" % request.args.get('team'))
data = cur.fetchone()
teamData = dict(abbr=data[0], full=data[1], nickname=data[2], pcReport=data[3],
exOpener=data[4], rsOpener=data[5], background_color=data[6], text_color=data[7])
return render_template('countdown.html', team=teamData)
if __name__ == '__main__':
app.run()
| {
"content_hash": "de2d19f4109281180af3de58f34d6119",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 85,
"avg_line_length": 28.471698113207548,
"alnum_prop": 0.6891981444665342,
"repo_name": "drewdez/mlb-countdown",
"id": "8a3c7ac40e06116ed9310e2328e0f3535bedd5f9",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlb_countdown.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "153"
},
{
"name": "HTML",
"bytes": "3450"
},
{
"name": "JavaScript",
"bytes": "784"
},
{
"name": "Python",
"bytes": "6215"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('college', models.CharField(choices=[('CAMD', 'CAMD'), ('CCIS', 'CCIS'), ('COS', 'COS'), ('CSSH', 'CSSH'), ('BOUVE', 'BOUVE'), ('DMSB', 'DMSB'), ('COE', 'COE'), ('LAW', 'LAW'), ('CPS', 'CPS'), ('PROVOST', 'PROVOST')], default='NONE', max_length=7)),
('course_number', models.CharField(max_length=10, null=True)),
('CRN', models.CharField(max_length=5, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'only numbers allowed')])),
('section', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Faculty Member',
'verbose_name_plural': 'Faculty Members',
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='New Partner Organization', max_length=100, null=True, unique=True)),
('is_active', models.BooleanField(default=True)),
('courses', models.ManyToManyField(to='submit_reports.Course')),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('courses', models.ManyToManyField(to='submit_reports.Course')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Teaching Assistant',
'verbose_name_plural': 'Teaching Assistants',
},
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grad_year', models.CharField(max_length=4, null=True, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'only numbers allowed')])),
('college', models.CharField(choices=[('CAMD', 'CAMD'), ('CCIS', 'CCIS'), ('COS', 'COS'), ('CSSH', 'CSSH'), ('BOUVE', 'BOUVE'), ('DMSB', 'DMSB'), ('COE', 'COE'), ('LAW', 'LAW'), ('CPS', 'CPS'), ('PROVOST', 'PROVOST')], default='NONE', max_length=7)),
('courses', models.ManyToManyField(related_name='students', to='submit_reports.Course')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SubmitReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('start_date', models.DateField(default=None)),
('end_date', models.DateField(default=None)),
('start_time', models.TimeField(default=None)),
('end_time', models.TimeField(default=None)),
('service_type', models.CharField(choices=[('DIRECT_SERVICE', 'Direct Service'), ('TRAINING', 'Trainings & Orientations'), ('IND_RESEARCH', 'Individual Research & Planning'), ('TEAM_RESEARCH', 'Team Research & Planning')], default='default', max_length=14, null=True)),
('status', models.CharField(choices=[('PENDING', 'PENDING'), ('APPROVED', 'APPROVED'), ('REJECTED', 'REJECTED')], default='PENDING', max_length=8)),
('summary', models.CharField(blank=True, max_length=150, null=True)),
('courses', models.ManyToManyField(blank=True, to='submit_reports.Course')),
('partner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='submit_reports.Partner')),
('submitter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='submit_reports.Student')),
],
options={
'verbose_name': 'Submitted Time Sheet',
'verbose_name_plural': 'Submitted Time Sheets',
},
),
migrations.AddField(
model_name='course',
name='instructor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='submit_reports.Faculty'),
),
]
| {
"content_hash": "7b48c8d7162cbacfa202d64c5de93742",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 285,
"avg_line_length": 56.72164948453608,
"alnum_prop": 0.5732460923300617,
"repo_name": "ServiceLearningB/ServiceLearningNew",
"id": "63a888474c2bc9c711d0e91272d9ce6bae84280f",
"size": "5574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submit_reports/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64400"
},
{
"name": "HTML",
"bytes": "10718"
},
{
"name": "JavaScript",
"bytes": "112369"
},
{
"name": "Python",
"bytes": "32429"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
__author__ = 'ar'
import glob
import json
import os
from functools import wraps
from app.backend.core import utils as dlsutils
from app.backend.core.models.cfg import CFG_MODEL, CFG_SOLVER, CFG_MODEL_TRAIN, CFG_PROGRESS, \
PREFIX_SNAPSHOT, EXT_MODEL_WEIGHTS, PREFIX_TASKS_DIR, CFG_EVAL_ROC, PREFIX_EVAL_ROC_DIR, CFG_MODEL_NETWORK, PREFIX_EVAL_FS_DIR
from app.backend.dataset import api as dbapi
from flow_parser import DLSDesignerFlowsParser
from ..utils import getDateTimeForConfig
####################################
class ModelTaskDirBuilder:
@staticmethod
def buildModelTrainTaskDir(cfgModel):
#
if not isinstance(cfgModel, dict):
with open(cfgModel, 'r') as f:
cfgModel = json.load(f)
#
modelParser = DLSDesignerFlowsParser(cfgModel)
modelTrainer, solverConfig = modelParser.buildKerasTrainer()
#
taskId=dlsutils.getUniqueTaskId(PREFIX_TASKS_DIR)
dirWithModels = dlsutils.getPathForModelsDir()
dirWithDatasets = dlsutils.getPathForDatasetDir()
dirTaskOut = os.path.join(dirWithModels, taskId)
#
datasetId = solverConfig['dataset-id']
dirDataset = os.path.join(dirWithDatasets, datasetId)
dlsutils.makeDirIfNotExists(dirTaskOut)
#
# modelAdjusted = modelTrainer.adjustModelInputOutput2DBData(modelTrainer.model, dirDataset)
modelAdjusted = modelTrainer.model
foutConfigModel = os.path.join(dirTaskOut, CFG_MODEL_TRAIN)
foutConfigNetwork = os.path.join(dirTaskOut, CFG_MODEL_NETWORK)
foutConfigSolver = os.path.join(dirTaskOut, CFG_SOLVER)
foutConfig = os.path.join(dirTaskOut, CFG_MODEL)
with open(foutConfigNetwork, 'w') as f:
f.write(json.dumps(cfgModel, indent=4))
with open(foutConfigModel, 'w') as f:
f.write(modelAdjusted.to_json(sort_keys=True, indent=4, separators=(',', ': ')))
with open(foutConfigSolver, 'w') as f:
f.write(json.dumps(solverConfig, indent=4))
# prepare basic model config
tdateTime = getDateTimeForConfig()
if datasetId in dbapi.datasetWatcher.dictDbInfo.keys():
dbName = dbapi.datasetWatcher.dictDbInfo[datasetId].cfg.getDBName()
else:
dbName = 'Unknown DB-Name'
modelConfig = {
'id': taskId,
'dataset-id': datasetId,
'dataset-name': dbName,
'date': tdateTime['date'],
'time': tdateTime['time'],
'type': 'image2d-classification',
'name': cfgModel['name'],
'network': cfgModel['name'],
'description': cfgModel['description']
}
with open(foutConfig, 'w') as f:
f.write(json.dumps(modelConfig, indent=4))
return (taskId, dirTaskOut)
####################################
class ModelInfo:
dirModel=None
pathCfg=None
pathModelCfg=None
pathSolverCfg=None
cfgDict=None
# decorator for check init-state
def checkInit(fun):
@wraps(fun)
def wrapped(inst, *args, **kwargs):
if inst.isInitialized():
return fun(inst, *args, **kwargs)
else:
inst.raiseErrorNotInitialized()
return wrapped
def __init__(self, dirModelTask):
self.cleanState()
self.dirModel = dirModelTask
# self.loadModelInfoFromDir(dirModelTask)
def cleanState(self):
self.dirModel = None
self.pathCfg = None
self.pathModelCfg = None
self.pathSolverCfg = None
self.cfgDict = None
def loadModelInfoById(self, modelId):
dirWithModels = dlsutils.getPathForModelsDir()
tdirModel = os.path.join(dirWithModels, modelId)
self.loadModelInfoFromDir(tdirModel)
def loadModelInfoFromDir(self, paramModelDir=None):
if paramModelDir is not None:
self.dirModel = paramModelDir
dlsutils.checkFilePathNotFoundError(self.dirModel, isDir=True)
#
self.pathCfg = os.path.join(self.dirModel, CFG_MODEL)
self.pathModelCfg = os.path.join(self.dirModel, CFG_MODEL_TRAIN)
self.pathSolverCfg = os.path.join(self.dirModel, CFG_SOLVER)
self.pathProgress = os.path.join(self.dirModel, CFG_PROGRESS)
#
dlsutils.checkFilePathNotFoundError(self.pathCfg)
dlsutils.checkFilePathNotFoundError(self.pathModelCfg)
dlsutils.checkFilePathNotFoundError(self.pathSolverCfg)
#
with open(self.pathCfg, 'r') as f:
tmpCfg = json.load(f)
with open(self.pathModelCfg, 'r') as f:
tmpModelCfg = json.load(f)
with open(self.pathSolverCfg, 'r') as f:
tmpSolverCfg = json.load(f)
progressJson = None
if os.path.isfile(self.pathProgress):
with open(self.pathProgress, 'r') as f:
progressJson = json.load(f)
sizeModelDir = dlsutils.getDirectorySizeInBytes(self.dirModel)
sizeModelStr = dlsutils.humanReadableSize(sizeModelDir)
tmpCfg['size'] = {
'bytes': sizeModelDir,
'str': sizeModelStr
}
listSnaphosts=glob.glob('%s/%s*.%s' % (self.dirModel, PREFIX_SNAPSHOT, EXT_MODEL_WEIGHTS))
lstSnapshotsId=[os.path.splitext(os.path.basename(xx))[0] for xx in listSnaphosts]
lstIdROC=self.getListIdROC()
self.cfgDict = {
'info': tmpCfg,
'solver': tmpSolverCfg,
'snapshots': lstSnapshotsId,
'progress': progressJson,
'rocid': lstIdROC
}
def isInitialized(self):
return (self.cfgDict is not None)
def toString(self):
if self.isInitialized():
tstr = '%s (%s)' % (self.getName(), self.getId())
else:
tstr = 'ModelInfo is not initialized!'
return tstr
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
#api
@checkInit
def getId(self):
return self.cfgDict['info']['id']
@checkInit
def getName(self):
return self.cfgDict['info']['name']
@checkInit
def getTraindedSnapshots(self):
return self.cfgDict['snapshots']
@checkInit
def getConfig(self):
return self.cfgDict
@checkInit
def getInfo(self):
return self.cfgDict['info']
@checkInit
def getConfigJson(self):
return json.dumps(self.cfgDict, indent=4)
def getListIdROC(self):
tret = []
lstDirROC = glob.glob('%s/%s*' % (self.dirModel, PREFIX_EVAL_ROC_DIR))
tmplLen=len(PREFIX_EVAL_ROC_DIR)+1
for ll in lstDirROC:
fnROC = os.path.join(ll, CFG_EVAL_ROC)
if os.path.isfile(fnROC):
#FIXME: it is a good solution?
rocId = os.path.basename(ll)
if len(rocId)>tmplLen:
tret.append(rocId[tmplLen:])
return tret
def getDataROC(self):
tret=[]
lstDirROC=glob.glob('%s/%s*' % (self.dirModel, PREFIX_EVAL_ROC_DIR))
for ll in lstDirROC:
fnROC=os.path.join(ll, CFG_EVAL_ROC)
if os.path.isfile(fnROC):
with open(fnROC,'r') as f:
tdataJson = json.load(f)
tret.append(tdataJson)
return tret
def getFeatureSpace(self):
lstDirROC=glob.glob('%s/%s*' % (self.dirModel, PREFIX_EVAL_FS_DIR))
lstDirROC.sort()
if(len(lstDirROC) > 0):
path = lstDirROC[-1]
fullPath = os.path.join(path, "fspace-train.json")
if os.path.isfile(fullPath):
with open(fullPath, 'r') as f:
return json.load(f)
return []
class ModelsWatcher:
dirModels = None
dictModelsInfo = {}
def __init__(self, pathDir=None):
if pathDir is None:
self.dirModels = dlsutils.getPathForModelsDir()
else:
self.dirModels = pathDir
def refreshModelsInfo(self):
if os.path.isdir(self.dirModels):
self.dictModelsInfo = {}
lstModelsDir = glob.glob('%s/mdltask-*' % self.dirModels)
for ii, pp in enumerate(lstModelsDir):
tmpModelInfo = ModelInfo(pp)
try:
tmpModelInfo.loadModelInfoFromDir()
if tmpModelInfo.isInitialized():
self.dictModelsInfo[tmpModelInfo.getId()] = tmpModelInfo
except Exception as err:
print ('ERROR::ModelsWatcher:refreshModelsInfo() Model [%s] is invalid \n\tmsg: %s' % (pp, err))
else:
raise Exception('Cant find directory with models [%s]' % self.dirModels)
def toString(self):
tstr = '%s' % self.dictModelsInfo.values()
return tstr
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
#api
def getModelsInfoAsList(self):
tret = []
for mdl in self.dictModelsInfo.values():
tret.append(mdl.getConfig())
return tret
def getModelROC(self, modelId):
if modelId in self.dictModelsInfo.keys():
return self.dictModelsInfo[modelId].getDataROC()
else:
raise Exception('Unknown model ID [%s]' % modelId)
def getFeatureSpace(self, modelId):
if modelId in self.dictModelsInfo.keys():
return self.dictModelsInfo[modelId].getFeatureSpace()
else:
raise Exception('Unknown model ID [%s]' % modelId)
####################################
if __name__ == '__main__':
pass | {
"content_hash": "93b14dac9f77cb6a0dbf3231e02a6122",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 130,
"avg_line_length": 38.1796875,
"alnum_prop": 0.5878862287702067,
"repo_name": "SummaLabs/DLS",
"id": "3f721065c0cd33da590da05dcc490f5eb95f7f59",
"size": "9816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/backend/core/models/mdlpreview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28477"
},
{
"name": "HTML",
"bytes": "146817"
},
{
"name": "JavaScript",
"bytes": "491364"
},
{
"name": "Jupyter Notebook",
"bytes": "10111"
},
{
"name": "Protocol Buffer",
"bytes": "115393"
},
{
"name": "Python",
"bytes": "877535"
},
{
"name": "Shell",
"bytes": "7969"
}
],
"symlink_target": ""
} |
"""
sudo raspi-config
enable spi
spi documentation
http://tightdev.net/SpiDev_Doc.pdf
using an opamp to amplify audio line
http://www.instructables.com/id/Arduino-Audio-Input/step3/Non-Inverting-Amplifier/
"""
# works with MCP3008, ch0
# returns x/10ths of a volt
import spidev
import time
import csv
spi=spidev.SpiDev() # create a spi object
spiBus = 0 # spi port 0
spiSpeed = 200000
spiDeviceCh = 0 # GPIO CE0
spiDevice = spi.open(spiBus,spiDeviceCh)
spi.max_speed_hz=spiSpeed # mcp3008 requires >10khz
# instructions for this value are found in the MCP3008 datasheet
# Table 5-2: Configure bits for the MCP3008
spiStart = 0b00000001
spiControl = 0b00001000 # single end mcp3008 ch0
""" other spi control values to try
0b1000 0000 # single-end ch0
0b1001 0000 # single-end ch1
0b0000 0000 # differential ch0 = in+
0b0001 0000 # differential ch1 = in+
"""
spiControlList = [0b10000000,0b10010000,0b00000000,0b00010000]
# spiControlList = [0b10000000]
spiPlaceholder = 0b00000000
# to_send = [spiControl,0x02]
# to_send = [spiStart,spiControl,spiPlaceholder]
#with open('spiCSVCapture.csv', 'w') as csvfile:
csvfile = open('spiCSVCapture.csv', 'w')
fieldnames = ['SpiControl', 'Resp Byte One','Resp Byte Two','Resp Byte Three']
CSVwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
CSVwriter.writeheader()
try:
while True:
# time.sleep(1)
# resp = spi.xfer(to_send)
# print (resp)
for spiControl in spiControlList:
to_send = [spiStart,spiControl,spiPlaceholder]
resp = spi.xfer(to_send)
# print (bin(spiControl) + " - " + str(resp))
CSVwriter.writerow({'SpiControl': bin(spiControl), 'Resp Byte One' : str(resp[0]),'Resp Byte Two' : str(resp[1]),'Resp Byte Three' : str(resp[2])})
time.sleep(.25)
except KeyboardInterrupt: #control-c
spi.close() # close the spi device
| {
"content_hash": "430b8d5bce05ba669492fa5c756e2896",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 159,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6939203354297694,
"repo_name": "mnr/rubberfish",
"id": "e4e7f768c908f29bde9ae9f5f0eaafb2c5cc7afc",
"size": "1908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/spi_testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57058"
},
{
"name": "Shell",
"bytes": "6072"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../exts'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['pngmath', 'blog']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GSoC Website'
copyright = u'2011, PCL'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GSoCWebsitedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GSoCWebsite.tex', u'GSoC Website Documentation',
u'PCL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gsocwebsite', u'GSoC Website Documentation',
[u'PCL'], 1)
]
html_sidebars = {
'**': [],
'using/windows': [],
}
html_add_permalinks = None
needs_sphinx = 1.0
file_insertion_enabled = True
raw_enabled = True
| {
"content_hash": "6e47f0310882e24ff6ace3504e368761",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 80,
"avg_line_length": 31.685446009389672,
"alnum_prop": 0.703659801452067,
"repo_name": "PointCloudLibrary/blog",
"id": "691a7b33af4441aaf418bfc594d41f8752d4e334",
"size": "7172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogweb/gsoc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "27327"
},
{
"name": "CMake",
"bytes": "1077"
},
{
"name": "CSS",
"bytes": "280284"
},
{
"name": "HTML",
"bytes": "1169773"
},
{
"name": "Makefile",
"bytes": "5429"
},
{
"name": "Python",
"bytes": "125176"
},
{
"name": "TeX",
"bytes": "8253"
}
],
"symlink_target": ""
} |
"""Basic Network packet types.
Defines and Implements Basic Network packet types , such as Ethertnet and LLDP.
"""
# System imports
from copy import deepcopy
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericStruct
from pyof.foundation.basic_types import (
BinaryData, FixedTypeList, HWAddress, IPAddress, IPv6Address, UBInt8,
UBInt16, UBInt32)
from pyof.foundation.exceptions import PackException, UnpackException
__all__ = ('ARP', 'Ethernet', 'EtherType', 'GenericTLV', 'IPv4', 'VLAN',
'TLVWithSubType', 'LLDP')
# NETWORK CONSTANTS AND ENUMS
class EtherType(IntEnum):
"""Enumeration with IEEE Ethernet types.
The items are being added as we need.
If you need one EtherType that is not listed below, please, send us a Pull
Request with the addition.
Ref: http://standards-oui.ieee.org/ethertype/eth.txt
"""
#: Internet Protocol version 4 (IPv4)
IPV4 = 0x0800
#: Address Resolution Protocol (ARP)
ARP = 0x0806
#: Reverse Address Resolution Protocol
RARP = 0x8035
#: VLAN-tagged frame (IEEE 802.1Q) and Shortest Path Bridging IEEE 802.1aq
#: with NNI compatibility[8]
#: IEEE Std 802.1Q - Customer VLAN Tag Type
VLAN = 0x8100
#: Internet Protocol Version 6 (IPv6)
IPV6 = 0x86DD
#: Ethernet flow control
ETHERNET_FLOW_CONTROL = 0x8808
#: MPLS (multiprotocol label switching) label stack - unicast
#: reference: RFC 3032 URL: ftp://ftp.rfc-editor.org/in-notes/rfc3032.txt
MPLS_UNICAST = 0x8847
#: MPLS (multiprotocol label switching) label stack - multicast
#: reference: RFC 3032 URL: ftp://ftp.rfc-editor.org/in-notes/rfc3032.txt
MPLS_MULTICAST = 0x8848
#: Link Layer Discovery Protocol (LLDP)
LLDP = 0x88CC
#: VLAN-tagged (IEEE 802.1Q) frame with double tagging
#: Std 802.1Q Service VLAN tag identifier
VLAN_QINQ = 0x88a8
class ARP(GenericStruct):
"""ARP packet "struct".
Contains fields for an ARP packet's header and data.
Designed for Ethernet and IPv4 only: needs to have some attributes changed
for other HTYPE and PTYPE implementations.
Must be encapsulated inside an Ethernet frame.
"""
htype = UBInt16()
ptype = UBInt16()
hlen = UBInt8()
plen = UBInt8()
oper = UBInt16()
sha = HWAddress()
spa = IPAddress()
tha = HWAddress()
tpa = IPAddress()
def __init__(self, htype=1, ptype=EtherType.IPV4, hlen=6, plen=4, oper=1,
sha='00:00:00:00:00:00', spa='0.0.0.0',
tha="00:00:00:00:00:00", tpa='0.0.0.0'):
"""Create an ARP with the parameters below.
Args:
htype (int): Hardware protocol type. Defaults to 1 for Ethernet.
ptype (int): Network protocol type. Defaults to 0x800 for IPv4.
hlen (int): Length of the hardware address. Defaults to 6 for MAC
addresses.
plen (int): Length of the networking protocol address. Defaults to
4 for IPv4 addresses.
oper (int): Determines the operation for this ARP packet. Must be 1
for ARP request or 2 for ARP reply. Defaults to 1.
sha (str): Sender hardware address. Defaults to
'00:00:00:00:00:00'.
spa (str): Sender protocol address. Defaults to '0.0.0.0'.
tha (str): Target hardware address. Defaults to
'00:00:00:00:00:00'.
tpa (str): Target protocol address. Defaults to '0.0.0.0'.
"""
super().__init__()
self.htype = htype
self.ptype = ptype
self.hlen = hlen
self.plen = plen
self.oper = oper
self.sha = sha
self.spa = spa
self.tha = tha
self.tpa = tpa
def is_valid(self):
"""Assure the ARP contains Ethernet and IPv4 information."""
return self.htype == 1 and self.ptype == EtherType.IPV4
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Check if the protocols involved are Ethernet and IPv4. Other protocols
are currently not supported.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
if not self.is_valid():
raise UnpackException("Unsupported protocols in ARP packet")
class VLAN(GenericStruct):
"""802.1q VLAN header."""
#: tpid (:class:`UBInt16`): Tag Protocol Identifier
tpid = UBInt16(EtherType.VLAN)
#: _tci (:class:`UBInt16`): Tag Control Information - has the
#: Priority Code Point, DEI/CFI bit and the VLAN ID
_tci = UBInt16()
def __init__(self, pcp=None, cfi=None, vid=None):
"""Create a VLAN with the parameters below.
If no arguments are set for a particular instance, it is interpreted as
abscence of VLAN information, and the pack() method will return an
empty binary string.
Args:
tpid (int): Tag Protocol Identifier. Defaults to 0x8100 for 802.1q.
pcp (int): 802.1p Priority Code Point. Defaults to 0 for Best
Effort Queue.
cfi (int): Canonical Format Indicator. Defaults to 0 for Ethernet.
vid (int): VLAN ID. If no VLAN is specified, value is 0.
"""
super().__init__()
self.tpid = EtherType.VLAN
self.pcp = pcp
self.cfi = cfi
self.vid = vid
def pack(self, value=None):
"""Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
If no arguments are set for a particular instance, it is interpreted as
abscence of VLAN information, and the pack() method will return an
empty binary string.
Returns:
bytes: Binary representation of this instance.
"""
if isinstance(value, type(self)):
return value.pack()
if self.pcp is None and self.cfi is None and self.vid is None:
return b''
self.pcp = self.pcp if self.pcp is not None else 0
self.cfi = self.cfi if self.cfi is not None else 0
self.vid = self.vid if self.vid is not None else 0
self._tci = self.pcp << 13 | self.cfi << 12 | self.vid
return super().pack()
def _validate(self):
"""Assure this is a valid VLAN header instance."""
if self.tpid.value not in (EtherType.VLAN, EtherType.VLAN_QINQ):
raise UnpackException
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
After unpacking, the abscence of a `tpid` value causes the assignment
of None to the field values to indicate that there is no VLAN
information.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
if self.tpid.value:
self._validate()
self.tpid = self.tpid.value
self.pcp = self._tci.value >> 13
self.cfi = (self._tci.value >> 12) & 1
self.vid = self._tci.value & 4095
else:
self.tpid = EtherType.VLAN
self.pcp = None
self.cfi = None
self.vid = None
class ListOfVLAN(FixedTypeList):
"""List of VLAN tags.
Represented by instances of VLAN.
"""
def __init__(self, items=None):
"""Create a ListOfVLAN with the optional parameters below.
Args:
items (:class:`~pyof.foundation.network_types.VLAN`):
Instance or a list of instances.
"""
super().__init__(pyof_class=VLAN, items=items)
class Ethernet(GenericStruct):
"""Ethernet "struct".
Objects of this class represents an ethernet packet. It contains the
'Ethernet header', composed by destination (MAC), source (MAC), type
(EtherType)[1] and the payload of the packet, as binary data.
This class does not consider the Ethernet 'Preamble' or the 'CRC'.
There is also a get_hash method, that hashes the binary representation of
the object so we can have a unique representation of the ethernet packet,
so we can keep a track of ethernet packets being flooded over the network.
[1] EtherTypes:
http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml#ieee-802-numbers-1
"""
destination = HWAddress()
source = HWAddress()
vlans = ListOfVLAN()
ether_type = UBInt16()
data = BinaryData()
def __init__(self, destination=None, source=None, vlans=None,
ether_type=None, data=b''):
"""Create an instance and set its attributes.
Args:
destination (:class:`~pyof.foundation.basic_types.HWAddress`):
The final destination MAC address.
source (:class:`~pyof.foundation.basic_types.HWAddress`):
The source Mac address of the packet.
ether_type (:class:`~pyof.foundation.basic_types.UBInt16`):
The EtherType of packet.
data (:class:`~pyof.foundation.basic_types.BinaryData`):
The content of the packet in binary format.
"""
super().__init__()
self.destination = destination
self.source = source
self.vlans = ListOfVLAN() if vlans is None else vlans
self.ether_type = ether_type
self.data = data
def get_hash(self):
"""Calculate a hash and returns it.
Returns:
int: Integer value that identifies this instance.
"""
return hash(self.pack())
@staticmethod
def _get_vlan_length(buff):
"""Return the total length of VLAN tags in a given Ethernet buffer."""
length = 0
begin = 12
while(buff[begin:begin+2] in (EtherType.VLAN.to_bytes(2, 'big'),
EtherType.VLAN_QINQ.to_bytes(2, 'big'))):
length += 4
begin += 4
return length
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Ethernet headers may have VLAN tags. If no VLAN tag is found, a
'wildcard VLAN tag' is inserted to assure correct unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
UnpackException: If there is a struct unpacking error.
"""
begin = offset
vlan_length = self._get_vlan_length(buff)
for attribute_name, class_attribute in self.get_class_attributes():
attribute = deepcopy(class_attribute)
if attribute_name == 'vlans':
attribute.unpack(buff[begin:begin+vlan_length])
else:
attribute.unpack(buff, begin)
setattr(self, attribute_name, attribute)
begin += attribute.get_size()
class GenericTLV(GenericStruct):
"""TLV structure of LLDP packets.
This is a Type, Length and Value (TLV) struct.
The LLDP/TLV definition states that the Type field have 7 bits, while
the length have 9 bits. The Value must be between 0-511 octets.
Internally, on the instances of this class, the Type is a integer
(0-127) and the Length is dynamically calculated based on the current
type and value.
"""
def __init__(self, tlv_type=127, value=None):
"""Create an instance and set its attributes.
Args:
tlv_type (int): Type used by this class. Defaults to 127.
value (:class:`~pyof.foundation.basic_types.BinaryData`):
Value stored by GenericTLV.
"""
super().__init__()
self.tlv_type = tlv_type
self._value = BinaryData() if value is None else value
@property
def value(self):
"""Return the value stored by GenericTLV.
Returns:
:class:`~pyof.foundation.basic_types.BinaryData`:
Value stored by GenericTLV.
"""
return self._value
@property
def length(self):
"""Return the length of value stored by GenericTLV.
Returns:
int: Value length in bytes.
"""
return len(self.value.pack())
@property
def header(self):
"""Header of the TLV Packet.
The header is composed by the Type (7 bits) and Length (9 bits),
summing up 16 bits. To achieve that, we need to do some bitshift
operations.
Returns:
:class:`~pyof.foundation.basic_types.UBInt16`:
Result after all operations.
"""
return UBInt16(((self.tlv_type & 127) << 9) | (self.length & 511))
def pack(self, value=None):
"""Pack the TLV in a binary representation.
Returns:
bytes: Binary representation of the struct object.
Raises:
:exc:`~.exceptions.ValidationError`: If validation fails.
"""
if value is None:
output = self.header.pack()
output += self.value.pack()
return output
if isinstance(value, type(self)):
return value.pack()
msg = "{} is not an instance of {}".format(value, type(self).__name__)
raise PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
header = UBInt16()
header.unpack(buff[offset:offset+2])
self.tlv_type = header.value >> 9
length = header.value & 511
begin, end = offset + 2, offset + 2 + length
self._value = BinaryData(buff[begin:end])
def get_size(self, value=None):
"""Return struct size.
Returns:
int: Returns the struct size based on inner attributes.
"""
if isinstance(value, type(self)):
return value.get_size()
return 2 + self.length
class IPv4(GenericStruct):
"""IPv4 packet "struct".
Contains all fields of an IP version 4 packet header, plus the upper layer
content as binary data.
Some of the fields were merged together because of their size being
inferior to 8 bits. They are represented as a single class attribute, but
pack/unpack methods will take into account the values in individual
instance attributes.
"""
#: _version_ihl (:class:`UBInt8`): IP protocol version + Internet Header
#: Length (words)
_version_ihl = UBInt8()
#: _dscp_ecn (:class:`UBInt8`): Differentiated Services Code Point
#: (ToS - Type of Service) + Explicit Congestion Notification
_dscp_ecn = UBInt8()
#: length (:class:`UBInt16`): IP packet length (bytes)
length = UBInt16()
#: identification (:class:`UBInt16`): Packet ID - common to all fragments
identification = UBInt16()
#: _flags_offset (:class:`UBInt16`): Fragmentation flags + fragmentation
#: offset
_flags_offset = UBInt16()
#: ttl (:class:`UBInt8`): Packet time-to-live
ttl = UBInt8()
#: protocol (:class:`UBInt8`): Upper layer protocol number
protocol = UBInt8()
#: checksum (:class:`UBInt16`): Header checksum
checksum = UBInt16()
#: source (:class:`IPAddress`): Source IPv4 address
source = IPAddress()
#: destination (:class:`IPAddress`): Destination IPv4 address
destination = IPAddress()
#: options (:class:`BinaryData`): IP Options - up to 320 bits, always
#: padded to 32 bits
options = BinaryData()
#: data (:class:`BinaryData`): Packet data
data = BinaryData()
def __init__(self, version=4, ihl=5, dscp=0, ecn=0, length=0,
identification=0, flags=0, offset=0, ttl=255, protocol=0,
checksum=0, source="0.0.0.0", destination="0.0.0.0",
options=b'', data=b''):
"""Create an IPv4 with the parameters below.
Args:
version (int): IP protocol version. Defaults to 4.
ihl (int): Internet Header Length. Default is 5.
dscp (int): Differentiated Service Code Point. Defaults to 0.
ecn (int): Explicit Congestion Notification. Defaults to 0.
length (int): IP packet length in bytes. Defaults to 0.
identification (int): Packet Id. Defaults to 0.
flags (int): IPv4 Flags. Defults 0.
offset (int): IPv4 offset. Defaults to 0.
ttl (int): Packet time-to-live. Defaults to 255
protocol (int): Upper layer protocol number. Defaults to 0.
checksum (int): Header checksum. Defaults to 0.
source (str): Source IPv4 address. Defaults to "0.0.0.0"
destination (str): Destination IPv4 address. Defaults to "0.0.0.0"
options (bytes): IP options. Defaults to empty bytes.
data (bytes): Packet data. Defaults to empty bytes.
"""
super().__init__()
self.version = version
self.ihl = ihl
self.dscp = dscp
self.ecn = ecn
self.length = length
self.identification = identification
self.flags = flags
self.offset = offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source = source
self.destination = destination
self.options = options
self.data = data
def _update_checksum(self):
"""Update the packet checksum to enable integrity check."""
source_list = [int(octet) for octet in self.source.split(".")]
destination_list = [int(octet) for octet in
self.destination.split(".")]
source_upper = (source_list[0] << 8) + source_list[1]
source_lower = (source_list[2] << 8) + source_list[3]
destination_upper = (destination_list[0] << 8) + destination_list[1]
destination_lower = (destination_list[2] << 8) + destination_list[3]
block_sum = ((self._version_ihl << 8 | self._dscp_ecn) + self.length +
self.identification + self._flags_offset +
(self.ttl << 8 | self.protocol) + source_upper +
source_lower + destination_upper + destination_lower)
while block_sum > 65535:
carry = block_sum >> 16
block_sum = (block_sum & 65535) + carry
self.checksum = ~block_sum & 65535
def pack(self, value=None):
"""Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
Returns:
bytes: Binary representation of this instance.
"""
# Set the correct IHL based on options size
if self.options:
self.ihl += int(len(self.options) / 4)
# Set the correct packet length based on header length and data
self.length = int(self.ihl * 4 + len(self.data))
self._version_ihl = self.version << 4 | self.ihl
self._dscp_ecn = self.dscp << 2 | self.ecn
self._flags_offset = self.flags << 13 | self.offset
# Set the checksum field before packing
self._update_checksum()
return super().pack()
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
self.identification = self.identification.value
self.flags = self._flags_offset.value >> 13
self.offset = self._flags_offset.value & 8191
self.ttl = self.ttl.value
self.protocol = self.protocol.value
self.checksum = self.checksum.value
self.source = self.source.value
self.destination = self.destination.value
if self.ihl > 5:
options_size = (self.ihl - 5) * 4
self.data = self.options.value[options_size:]
self.options = self.options.value[:options_size]
else:
self.data = self.options.value
self.options = b''
class IPv6(GenericStruct):
"""IPv6 packet "struct".
Contains all fields of an IP version 6 packet header, plus the upper layer
content as binary data.
Some of the fields were merged together because of their size being
inferior to 8 bits. They are represented as a single class attribute, but
pack/unpack methods will take into account the values in individual
instance attributes.
"""
#: _version_tclass_flabel (:class:`UBInt32`): IP protocol version +
#: Traffic Class + Flow Label
_version_tclass_flabel = UBInt32()
#: length (:class:`UBInt16`): Payload length (bytes)
length = UBInt16()
#: next_header (:class:`UBInt8`): Next header
next_header = UBInt8()
#: hop_limit (:class:`UBInt8`): Hop limit
hop_limit = UBInt8()
#: source (:class:`IPv6Address`): Source IPv6 address
source = IPv6Address()
#: destination (:class:`IPv6Address`): Destination IPv6 address
destination = IPv6Address()
#: data (:class:`BinaryData`): Packet data
data = BinaryData()
def __init__(self, version=6, tclass=0, flabel=0, length=0,
next_header=0, hop_limit=255, source="0:0:0:0:0:0:0:0",
destination="0:0:0:0:0:0:0:0", data=b''):
"""Create an IPv6 with the parameters below.
Args:
version (int): IP protocol version. Defaults to 6.
tclass (int): DS (6 bits) + ECN (2 bits). Default is 0.
flabel (int): Flow label. Defaults to 0.
length (int): Payload length in bytes. Defaults to 0.
next_header (int): Type of next header or protocol field.
Defaults to 0.
hop_limit (int): Packet hop limit. Defaults to 255.
source (str): Source IPv6 address.
Defaults to "0:0:0:0:0:0:0:0".
destination (str): Destination IPv6 address.
Defaults to "0:0:0:0:0:0:0:0".
data (bytes): Packet data. Defaults to empty bytes.
"""
super().__init__()
self.version = version
self.tclass = tclass
self.flabel = flabel
self.length = length
self.next_header = next_header
self.hop_limit = hop_limit
self.source = IPv6Address(source)
self.destination = IPv6Address(destination)
self.data = data
def pack(self, value=None):
"""Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
Returns:
bytes: Binary representation of this instance.
"""
# Set the correct packet length based on header length and data
self.length = len(self.data)
_version_tclass = (self.version << 28) | (self.tclass << 20)
self._version_tclass_flabel = _version_tclass | self.flabel
return super().pack()
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
self.version = self._version_tclass_flabel.value >> 28
self.tclass = (self._version_tclass_flabel.value >> 20) & 255
self.flabel = self._version_tclass_flabel.value & 1048575
self.length = self.length.value
self.next_header = self.next_header.value
self.hop_limit = self.hop_limit.value
self.source = self.source.value
self.destination = self.destination.value
self.data = self.data.value
class TLVWithSubType(GenericTLV):
"""Modify the :class:`GenericTLV` to a Organization Specific TLV structure.
Beyond the standard TLV (type, length, value), we can also have a more
specific structure, with the :attr:`value` field being splitted into a
:attr:`sub_type` field and a new :attr:`sub_value` field.
"""
def __init__(self, tlv_type=1, sub_type=7, sub_value=None):
"""Create an instance and set its attributes.
Args:
tlv_type (int): Type used by this class. Defaults to 1.
sub_type (int): Sub type value used by this class. Defaults to 7.
sub_value (:class:`~pyof.foundation.basic_types.BinaryData`):
Data stored by TLVWithSubType. Defaults to empty BinaryData.
"""
super().__init__(tlv_type)
self.sub_type = sub_type
self.sub_value = BinaryData() if sub_value is None else sub_value
@property
def value(self):
"""Return sub type and sub value as binary data.
Returns:
:class:`~pyof.foundation.basic_types.BinaryData`:
BinaryData calculated.
"""
binary = UBInt8(self.sub_type).pack() + self.sub_value.pack()
return BinaryData(binary)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
header = UBInt16()
header.unpack(buff[offset:offset+2])
self.tlv_type = header.value >> 9
length = header.value & 511
begin, end = offset + 2, offset + 2 + length
sub_type = UBInt8()
sub_type.unpack(buff[begin:begin+1])
self.sub_type = sub_type.value
self.sub_value = BinaryData(buff[begin+1:end])
class LLDP(GenericStruct):
"""LLDP class.
Build a LLDP packet with TLVSubtype and Generic Subtypes.
It contains a chassis_id TLV, a port_id TLV, a TTL (Time to live) and
another TLV to represent the end of the LLDP Packet.
"""
#: chassis_id (:class:`~TLVWithSubType`) with tlv_type = 1 and sub_type = 7
chassis_id = TLVWithSubType(tlv_type=1, sub_type=7)
#: port_id (:class:`TLVWithSubType`) with tlv = 2 and sub_type = 7
port_id = TLVWithSubType(tlv_type=2, sub_type=7)
#: TTL (:class:`GenericTLV`) time is given in seconds, between 0 and 65535,
#: with tlv_type = 3
ttl = GenericTLV(tlv_type=3, value=UBInt16(120))
# We are not using list of tlvs for now
# tlvs = ListOfTLVs()
#: end (:class:`GenericTLV`) with tlv_type = 0
end = GenericTLV(tlv_type=0)
| {
"content_hash": "8a11a59dba4acebdb88fd55cc986cd06",
"timestamp": "",
"source": "github",
"line_count": 792,
"max_line_length": 94,
"avg_line_length": 35.253787878787875,
"alnum_prop": 0.6019125389491781,
"repo_name": "kytos/python-openflow",
"id": "ae6b1d05d44dca6f641d2e32d358f9b1aa5327f8",
"size": "27921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyof/foundation/network_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Python",
"bytes": "506669"
}
],
"symlink_target": ""
} |
from dronekit import *
from dronedirect import DroneDirect
# Libraries for UDS socket
import socket
import sys
import math
import json
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = '/tmp/bci-data.sock'
print >>sys.stderr, 'Connecting to %s' % server_address
try:
sock.connect(server_address)
except socket.error, msg:
print >>sys.stderr, msg
sys.exit(1)
# Initial setup vars for drone
SIM = False
running = True
data_string = ''
packet_depth = 0
takeoff = False # it has not yet taken off
# Connect to drone
print 'Connecting to drone...'
if SIM:
vehicle = connect('tcp:127.0.0.1:5760', wait_ready=True)
else:
vehicle = connect('0.0.0.0:14550', wait_ready=True) # connecting from GCS
#vehicle = connect('udpout:127.0.0.1:14560', wait_ready=True) #connecting from onboard solo
# Take control of the drone
dd = DroneDirect(vehicle)
dd.take_control()
if SIM:
#arm and takeoff drone - DO NOT USE THIS ON A REAL DRONE ONLY IN SIMULATION
if vehicle.armed == False:
# Don't let the user try to arm until autopilot is ready
print 'Waiting for vehicle to initialise...'
while not vehicle.is_armable:
time.sleep(1)
vehicle.armed = True
print 'Vehicle Armed'
dd.takeoff()
try:
while running:
# Listen for data
print "Waiting for data..."
data = sock.recv(16)
print "Received!"
# Turn data into nice JSON packages
for i in data:
if i == '{':
packet_depth = packet_depth + 1
elif i == '}':
packet_depth = packet_depth - 1
data_string += i
if packet_depth == 0:
# Parse JSON
packet = json.loads(data_string)
# Set up appropriate action
x = 0
y = 0
z = 0
degrees = 0
gimbalPitch = 0
# Mapping of thoughts/actions to copter motions
if packet['action'] == 'xval':
x = packet['power'] # sideways (to the right?)
elif packet['action'] == 'pull':
y = 3 # forward
elif packet['action'] == 'push':
z = 3 # upward
elif packet['action'] == 'yaw':
degrees = int(packet['power'])/10 # rotate yaw neg/pos based on X vector of head motion
print "Rotating at a rate of " + str(degrees)
elif packet['action'] == 'gimbal-pitch':
gimbalPitch = int(packet['power'])/10 # rotate yaw neg/pos based on X vector of head motion
elif packet['action'] == 'neutral':
data_string = ''
continue
else:
print 'Unmapped action: "%s"' % packet['action']
# Move the copter accordingly
if not takeoff:
dd.takeoff(altitude_meters=15)
takeoff = True
else:
# Move according to any thoughts
if (x != 0 or y != 0 or z != 0):
dd.translate(x=x, y=y, z=z)
# Rotate according to head motions
if (degrees):
dd.point(degrees=degrees)
# Reset for next JSON packet
data_string = ''
finally:
dd.release()
| {
"content_hash": "71646218d9caff5a8adcaddd4ceabfb8",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 111,
"avg_line_length": 32.527272727272724,
"alnum_prop": 0.529346003353829,
"repo_name": "Frijol/Drone-BCI",
"id": "8e6676d9b778224ce17046ae665db16f38fbfd19",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drone-control.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26954"
},
{
"name": "C++",
"bytes": "76734"
},
{
"name": "Java",
"bytes": "46238"
},
{
"name": "Objective-C",
"bytes": "22930"
},
{
"name": "Objective-C++",
"bytes": "17105"
},
{
"name": "Python",
"bytes": "16535"
}
],
"symlink_target": ""
} |
"""Simple example of video playback.
Usage::
video.py <filename>
See the Programming Guide for a partial list of supported video formats.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import pyglet
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
source = pyglet.media.load(sys.argv[1])
format = source.video_format
if not format:
print('No video track in this source.')
sys.exit(1)
player = pyglet.media.Player()
player.queue(source)
player.play()
window = pyglet.window.Window(width=format.width, height=format.height)
@window.event
def on_draw():
player.get_texture().blit(0, 0)
pyglet.app.run()
| {
"content_hash": "8bb56f80bb165849c17dda0fb857fe43",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 17.394736842105264,
"alnum_prop": 0.686838124054463,
"repo_name": "bitcraft/pyglet",
"id": "a583a329a0233bee7c08b56ee6845d8f2bea213f",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/video.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6745"
},
{
"name": "PHP",
"bytes": "2192"
},
{
"name": "Python",
"bytes": "6201398"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
@test.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
def test_tokens(self):
# Valid user's token is authenticated
# Create a User
u_name = data_utils.rand_name('user')
u_desc = '%s-description' % u_name
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_name('pass')
user = self.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email)
self.addCleanup(self.client.delete_user, user['id'])
# Perform Authentication
resp = self.token.auth(user_id=user['id'],
password=u_password).response
subject_token = resp['x-subject-token']
# Perform GET Token
token_details = self.client.get_token(subject_token)
self.assertEqual(resp['x-subject-token'], subject_token)
self.assertEqual(token_details['user']['id'], user['id'])
self.assertEqual(token_details['user']['name'], u_name)
# Perform Delete Token
self.client.delete_token(subject_token)
self.assertRaises(lib_exc.NotFound, self.client.get_token,
subject_token)
@test.idempotent_id('565fa210-1da1-4563-999b-f7b5b67cf112')
def test_rescope_token(self):
"""Rescope a token.
An unscoped token can be requested, that token can be used to request a
scoped token. The scoped token can be revoked, and the original token
used to get a token in a different project.
"""
# Create a user.
user_name = data_utils.rand_name(name='user')
user_password = data_utils.rand_name(name='pass')
user = self.client.create_user(user_name, password=user_password)
self.addCleanup(self.client.delete_user, user['id'])
# Create a couple projects
project1_name = data_utils.rand_name(name='project')
project1 = self.client.create_project(project1_name)
self.addCleanup(self.client.delete_project, project1['id'])
project2_name = data_utils.rand_name(name='project')
project2 = self.client.create_project(project2_name)
self.addCleanup(self.client.delete_project, project2['id'])
# Create a role
role_name = data_utils.rand_name(name='role')
role = self.client.create_role(role_name)
self.addCleanup(self.client.delete_role, role['id'])
# Grant the user the role on both projects.
self.client.assign_user_role(project1['id'], user['id'],
role['id'])
self.client.assign_user_role(project2['id'], user['id'],
role['id'])
# Get an unscoped token.
token_auth = self.token.auth(user_id=user['id'],
password=user_password)
token_id = token_auth.response['x-subject-token']
orig_expires_at = token_auth['token']['expires_at']
orig_issued_at = token_auth['token']['issued_at']
orig_user = token_auth['token']['user']
self.assertIsInstance(token_auth['token']['expires_at'], unicode)
self.assertIsInstance(token_auth['token']['issued_at'], unicode)
self.assertEqual(['password'], token_auth['token']['methods'])
self.assertEqual(user['id'], token_auth['token']['user']['id'])
self.assertEqual(user['name'], token_auth['token']['user']['name'])
self.assertEqual('default',
token_auth['token']['user']['domain']['id'])
self.assertEqual('Default',
token_auth['token']['user']['domain']['name'])
self.assertNotIn('catalog', token_auth['token'])
self.assertNotIn('project', token_auth['token'])
self.assertNotIn('roles', token_auth['token'])
# Use the unscoped token to get a scoped token.
token_auth = self.token.auth(token=token_id,
project_name=project1_name,
project_domain_name='Default')
token1_id = token_auth.response['x-subject-token']
self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
'Expiration time should match original token')
self.assertIsInstance(token_auth['token']['issued_at'], unicode)
self.assertNotEqual(orig_issued_at, token_auth['token']['issued_at'])
self.assertEqual(set(['password', 'token']),
set(token_auth['token']['methods']))
self.assertEqual(orig_user, token_auth['token']['user'],
'User should match original token')
self.assertIsInstance(token_auth['token']['catalog'], list)
self.assertEqual(project1['id'],
token_auth['token']['project']['id'])
self.assertEqual(project1['name'],
token_auth['token']['project']['name'])
self.assertEqual('default',
token_auth['token']['project']['domain']['id'])
self.assertEqual('Default',
token_auth['token']['project']['domain']['name'])
self.assertEqual(1, len(token_auth['token']['roles']))
self.assertEqual(role['id'], token_auth['token']['roles'][0]['id'])
self.assertEqual(role['name'], token_auth['token']['roles'][0]['name'])
# Revoke the unscoped token.
self.client.delete_token(token1_id)
# Now get another scoped token using the unscoped token.
token_auth = self.token.auth(token=token_id,
project_name=project2_name,
project_domain_name='Default')
self.assertEqual(project2['id'],
token_auth['token']['project']['id'])
self.assertEqual(project2['name'],
token_auth['token']['project']['name'])
| {
"content_hash": "6035bdf398579d36445689f830c07493",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 46.30075187969925,
"alnum_prop": 0.5808704124715817,
"repo_name": "NexusIS/tempest",
"id": "951bc78dd92d6a159100270c37174187b272b676",
"size": "6794",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2703656"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
"""Django Celery Integration."""
# :copyright: (c) 2009 - 2012 by Ask Solem.
# :license: BSD, see LICENSE for more details.
from __future__ import absolute_import
import os
VERSION = (3, 0, 11)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://celeryproject.org"
__docformat__ = "restructuredtext"
__license__ = "BSD (3 clause)"
# -eof meta-
def setup_loader():
os.environ.setdefault("CELERY_LOADER", "djcelery.loaders.DjangoLoader")
# Importing this module enables the Celery Django loader.
setup_loader()
from celery import current_app as celery # noqa
| {
"content_hash": "5c598ab54ed46523284fd5ebe1fd1225",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 27,
"alnum_prop": 0.677037037037037,
"repo_name": "planorama/django-celery",
"id": "ed1cde7381ee7cfea895c2373f74815bf4826518",
"size": "675",
"binary": false,
"copies": "3",
"ref": "refs/heads/3.0",
"path": "djcelery/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210848"
},
{
"name": "Shell",
"bytes": "2065"
}
],
"symlink_target": ""
} |
import os
import signal
import sys
import traceback
import threading
from typing import Optional
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugin import run_hook
from electrum.base_wizard import GoBack
from electrum.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config, daemon, plugins):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.nd = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_network_dialog(self, parent):
if not self.daemon.network:
parent.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
if self.nd:
self.nd.on_update()
self.nd.show()
self.nd.raise_()
return
self.nd = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.nd.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + str(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + str(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + str(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
self.config.open_last_wallet()
path = self.config.get_wallet_path()
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
| {
"content_hash": "293bceaf4b20798f13e738de6587fb33",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 143,
"avg_line_length": 38.61309523809524,
"alnum_prop": 0.5917989825805458,
"repo_name": "fujicoin/electrum-fjc",
"id": "0fa9095b130313ec1f625e794be57a288a5bd011",
"size": "14139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/gui/qt/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7756"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "877"
},
{
"name": "NSIS",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "2346736"
},
{
"name": "Shell",
"bytes": "30493"
}
],
"symlink_target": ""
} |
from flask import Flask, request, url_for, render_template, flash, redirect, abort
from jinja2 import evalcontextfilter, Markup, escape
from flask_mail import Mail, Message
from raven.contrib.flask import Sentry, Client
from projects_controller import ProjectsController
from redirects_controller import RedirectsController
import config
import re
import strings
import atexit
app = Flask(__name__)
app.secret_key = config.SECRET_KEY
app.url_map.strict_slashes = False
app.config.update(config.APP_CONFIG)
app.config.update(config.MAIL_SETTINGS)
mail = Mail(app)
app.config.update(config.SENTRY_SETTINGS)
sentry = Sentry(app)
projects_controller = ProjectsController()
redirects_controller = RedirectsController()
def close_db_conn():
projects_controller.close()
atexit.register(close_db_conn)
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', mixpanel_token=mixpanel_token()), 404
@app.route('/')
def index():
current_projects = projects_controller.get_current_projects()
past_projects = projects_controller.get_past_projects()
return render_template('index.html', current_projects=current_projects, past_projects=past_projects, mixpanel_token=mixpanel_token())
@app.route('/start', methods=['GET', 'POST'])
def start_project():
if request.method == 'GET':
return render_template('start.html', form={}, errors={}, mixpanel_token=mixpanel_token())
form = request.form
errors = {}
if not form['name']:
errors['name'] = strings.ERROR_NO_NAME
if not form['email']:
errors['email'] = strings.ERROR_NO_EMAIL_TO_GET_AHOLD
if not form['ptitle']:
errors['ptitle'] = strings.ERROR_NO_PROJ_TITLE
if not form['desc']:
errors['desc'] = strings.ERROR_NO_PROJ_DESC
if not errors:
subject = strings.SUBJ_PROJ_NEW % form.get('ptitle')
msg = Message(subject)
msg.add_recipient(email_address(config.CONTACT_EMAIL))
msg.html = render_template('mail/start.html', form=form)
msg.body = render_template('mail/start.txt', form=form)
mail.send(msg)
flash(strings.SUCCESS_APP_SUBMITTED, 'success')
return redirect(url_for('index'))
flash(strings.ERROR_NOT_SUBMITTED, 'danger')
return render_template('start.html', form=form, errors=errors, mixpanel_token=mixpanel_token())
@app.route('/<dynamic>', methods=['GET', 'POST'])
def dynamic(dynamic):
# First, test if if it's a project
projects = projects_controller.get_all_projects()
if dynamic in projects:
project_data = projects[dynamic]
past_project_url = project_data.get('past_project_url')
if past_project_url:
# The project is over, we should redirect to the post
return redirect(past_project_url)
else:
return render_project(dynamic, project_data)
redirects = redirects_controller.get_redirects()
if dynamic in redirects:
return redirect(redirects[dynamic])
abort(404)
def render_project(project_name, project_data):
if request.method == 'GET':
return render_template('project.html', project_data=project_data, form={}, errors={}, mixpanel_token=mixpanel_token())
form = request.form
errors = {}
if 'join_email' in form:
if not form['join_email']:
errors['join_email'] = strings.ERROR_NO_EMAIL_TO_GET_AHOLD
if not errors:
subject = strings.SUBJ_PROJ_JOIN_REQUESTED % project_data['name']
msg = Message(subject)
msg.add_recipient(email_address(project_data['leaders'][0]['email']))
msg.html = render_template('mail/join_project.html', form=form)
msg.body = render_template('mail/join_project.txt', form=form)
mail.send(msg)
flash_msg = strings.SUCCESS_PROJ_JOINED % project_data['name']
flash(flash_msg, 'success')
return redirect('/' + project_name)
if 'ask_msg' in form:
if not form['ask_msg']:
errors['ask_msg'] = strings.ERROR_DONT_FORGET_MSG
if not form['ask_email']:
errors['ask_email'] = strings.ERROR_NO_EMAIL_TO_ANSWER
if not errors:
subject = strings.SUBJ_PROJ_QUESTION % project_data['name']
msg = Message(subject, reply_to=form.get('ask_email'))
msg.add_recipient(email_address(project_data['leaders'][0]['email']))
msg.html = render_template('mail/project_question.html', form=form)
msg.body = render_template('mail/project_question.txt', form=form)
mail.send(msg)
flash_msg = strings.SUCCESS_MESSAGE_SUBMITTED
flash(flash_msg, 'success')
return redirect('/' + project_name)
flash(strings.ERROR_NOT_SUBMITTED, 'danger')
return render_template('project.html', project_data=project_data, form=form, errors=errors, mixpanel_token=mixpanel_token())
@app.route('/dev_sync')
def dev_save_and_reload_all_data():
save_all_data()
reload_all_data()
return redirect(redirect_url())
@app.route('/dev_reload')
def dev_reload_all_data():
reload_all_data()
return redirect(redirect_url())
def mixpanel_token():
if config.MIXPANEL_SUPPRESS_SEND:
return None
return config.MIXPANEL_TOKEN
def save_all_data():
projects_controller.write_projects()
redirects_controller.load_redirects()
def reload_all_data():
projects_controller.load_projects()
redirects_controller.load_redirects()
def redirect_url():
return request.args.get('next') or request.referrer or url_for('index')
def email_address(email):
if app.debug or app.testing:
return config.DEBUG_EMAIL
return email
if __name__ == '__main__':
app.run()
| {
"content_hash": "50743c10b51a084ab11f17245b01c25b",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 137,
"avg_line_length": 32.547872340425535,
"alnum_prop": 0.6554992645857166,
"repo_name": "teslaworksumn/teslaworks.net",
"id": "c699ed6a5e8a729b18fefd63cb3ec9494e4e611e",
"size": "6119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5013"
},
{
"name": "JavaScript",
"bytes": "93"
},
{
"name": "Python",
"bytes": "15025"
}
],
"symlink_target": ""
} |
from distutils.core import setup
version = "0.7.1"
setup(name="riemann-sumd",
version=version,
description="Python agent for scheduling event generating processes and sending the results to Riemann",
author="Brian Hatfield",
author_email="bmhatfield@gmail.com",
url="https://github.com/bmhatfield/riemann-sumd",
package_dir={'': 'lib'},
py_modules=['event', 'loader', 'scheduler', 'sender', 'task', 'runner'],
data_files=[('/etc/init/', ["init/ubuntu/sumd.conf"]),
('/etc/sumd', ['examples/etc/sumd/sumd.conf']),
('/etc/sumd/tasks.d', ['examples/etc/sumd/tasks.d/simple.task.example']),
('/etc/sumd/tags.d', ['examples/etc/sumd/tags.d/simple.tag.example'])],
scripts=["bin/sumd"],
install_requires=[
"pyyaml",
"python-daemon",
"bernhard>=0.2.2",
"requests"
]
)
| {
"content_hash": "549430537a58313de1210a8fd700591b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 110,
"avg_line_length": 38.875,
"alnum_prop": 0.5712754555198285,
"repo_name": "crashlytics/riemann-sumd",
"id": "89b9c9c13bd56effca48e605c22105ef61ec7dc2",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32640"
}
],
"symlink_target": ""
} |
import os
import unittest
import collections
from wordpress_xmlrpc import Client
from wordpress_xmlrpc.compat import ConfigParser
class WordPressTestCase(unittest.TestCase):
def setUp(self):
config = ConfigParser()
with open('wp-config.cfg', 'r') as f:
config.readfp(f)
self.xmlrpc_url = config.get('wordpress', 'url')
self.username = config.get('wordpress', 'username')
self.userid = config.get('wordpress', 'userid')
self.client = Client(self.xmlrpc_url,
self.username,
config.get('wordpress', 'password'))
def assert_list_of_classes(self, lst, kls):
"""
Verifies that a list contains objects of a specific class.
"""
self.assertTrue(isinstance(lst, collections.Iterable))
for obj in lst:
self.assertTrue(isinstance(obj, kls))
| {
"content_hash": "0d3895659b1f6b7e0fa4ec638043cd5e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 31.466666666666665,
"alnum_prop": 0.590042372881356,
"repo_name": "maxcutler/python-wordpress-xmlrpc",
"id": "d95f10be9e8f582e18ec6e7e2d1e507a625a1202",
"size": "944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64700"
}
],
"symlink_target": ""
} |
import os
import subprocess
from ajenti.api import *
from ajenti.plugins.main.api import SectionPlugin
from ajenti.plugins.services.api import ServiceMultiplexor
from ajenti.ui import on
from ajenti.ui.binder import Binder
from ajenti.util import platform_select
from reconfigure.configs.base import Reconfig
from reconfigure.parsers import SSVParser
from reconfigure.builders import BoundBuilder
from reconfigure.nodes import Node, PropertyNode
from reconfigure.items.bound import BoundData
# SNMP
class SNMPData (BoundData):
pass
class MIBData (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'mibs')),
Node('token', PropertyNode('value', 'IF-MIB')),
)
SNMPData.bind_collection('mibs', selector=lambda x: x.children[0].get('value').value == 'mibs', item_class=MIBData)
MIBData.bind_property('value', 'name', path=lambda x: x.children[1])
class SNMPConfig (Reconfig):
def __init__(self, **kwargs):
k = {
'parser': SSVParser(),
'builder': BoundBuilder(SNMPData),
}
k.update(kwargs)
Reconfig.__init__(self, **k)
# SNMPD
class SNMPDData (BoundData):
pass
class ROCommunityData (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'rocommunity')),
Node('token', PropertyNode('value', 'public 192.168.0.0/24')),
)
class RWCommunityData (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'rwcommunity')),
Node('token', PropertyNode('value', 'public 192.168.0.0/24')),
)
class Sink1Data (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'trapsink')),
Node('token', PropertyNode('value', 'localhost public')),
)
class Sink2Data (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'trap2sink')),
Node('token', PropertyNode('value', 'localhost public')),
)
class Sink2cData (BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'informsink')),
Node('token', PropertyNode('value', 'localhost public')),
)
SNMPDData.bind_collection('rocommunities', selector=lambda x: x.children[0].get('value').value == 'rocommunity', item_class=ROCommunityData)
SNMPDData.bind_collection('rwcommunities', selector=lambda x: x.children[0].get('value').value == 'rwcommunity', item_class=RWCommunityData)
SNMPDData.bind_collection('sinks1', selector=lambda x: x.children[0].get('value').value == 'trapsink', item_class=Sink1Data)
SNMPDData.bind_collection('sinks2', selector=lambda x: x.children[0].get('value').value == 'trap2sink', item_class=Sink2Data)
SNMPDData.bind_collection('sinks2c', selector=lambda x: x.children[0].get('value').value == 'informsink', item_class=Sink2cData)
ROCommunityData.bind_property('value', 'value', path=lambda x: x.children[1])
RWCommunityData.bind_property('value', 'value', path=lambda x: x.children[1])
for s in [Sink1Data, Sink2Data, Sink2cData]:
s.bind_property('value', 'value', path=lambda x: x.children[1])
class SNMPDConfig (Reconfig):
def __init__(self, **kwargs):
k = {
'parser': SSVParser(maxsplit=1),
'builder': BoundBuilder(SNMPDData),
}
k.update(kwargs)
Reconfig.__init__(self, **k)
@plugin
class SNMPDPlugin (SectionPlugin):
service_name = platform_select(
default='snmpd',
)
def init(self):
self.title = 'SNMP'
self.icon = 'exchange'
self.category = _('Software')
self.append(self.ui.inflate('snmpd:main'))
self.find('servicebar').name = self.service_name
self.find('servicebar').reload()
self.snmp_config = SNMPConfig(path=platform_select(
default='/etc/snmp/snmp.conf',
))
self.snmpd_config = SNMPDConfig(path=platform_select(
default='/etc/snmp/snmpd.conf',
))
self.find('rocommunities').new_item = lambda c: ROCommunityData()
self.find('rwcommunities').new_item = lambda c: RWCommunityData()
self.find('sinks1').new_item = lambda c: Sink1Data()
self.find('sinks2').new_item = lambda c: Sink2Data()
self.find('sinks2c').new_item = lambda c: Sink2cData()
self.binder = Binder(None, self)
def on_page_load(self):
self.refresh()
def refresh(self):
self.snmp_config.load()
self.snmpd_config.load()
self.rocommunities = self.snmpd_config.tree.rocommunities
self.rwcommunities = self.snmpd_config.tree.rwcommunities
self.sinks1 = self.snmpd_config.tree.sinks1
self.sinks2 = self.snmpd_config.tree.sinks2
self.sinks2c = self.snmpd_config.tree.sinks2c
enabled_mibs = []
for mib in self.snmp_config.tree.mibs:
for x in mib.name.strip('-+:').split(':'):
enabled_mibs.append(x)
self.mibs = []
for dirpath, dirname, filenames in os.walk('/usr/share/mibs', followlinks=True):
for x in filenames:
if not x.startswith('.'):
mib = MIBData()
mib.name = x
mib.selected = x in enabled_mibs
self.mibs.append(mib)
self.mibs = sorted(self.mibs, key=lambda x: x.name)
self.binder.setup(self).populate()
@on('save', 'click')
def save(self):
self.binder.update()
mib = MIBData()
mib.name = ':'.join([x.name for x in self.mibs if x.selected])
for x in list(self.snmp_config.tree.mibs):
self.snmp_config.tree.mibs.remove(x)
self.snmp_config.tree.mibs.append(mib)
self.snmp_config.save()
self.snmpd_config.save()
self.refresh()
self.context.notify('info', _('Saved'))
ServiceMultiplexor.get().get_one(self.service_name).restart()
| {
"content_hash": "67c76ef51b2cc0c34a6878228134abf2",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 140,
"avg_line_length": 32.170984455958546,
"alnum_prop": 0.6131422129167338,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "21e8f13669d69ecb1d41b05940ca1f0df3c4149c",
"size": "6209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usr/share/pyshared/ajenti/plugins/snmpd/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
from threading import Event,Thread
import threading
import time
def conn_mysql():
print('%s waiting...' %threading.current_thread().getName())
e.wait()
print('%s start to connect mysql....' %threading.current_thread().getName())
time.sleep(2)
def check_mysql():
print('%s checking ...'%threading.current_thread().getName())
time.sleep(4)
e.set()
if __name__ == '__main__':
e=Event()
c1=Thread(target=conn_mysql)
c2 = Thread(target=conn_mysql)
c3 = Thread(target=conn_mysql)
c4=Thread(target=check_mysql)
c1.start()
c2.start()
c3.start()
c4.start() | {
"content_hash": "f313dcd82f09b14328aceb566ccc9137",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 26.608695652173914,
"alnum_prop": 0.6290849673202614,
"repo_name": "5StevenWu/Coursepy",
"id": "97b468c92973213b95cbeb0b089d6bbcb4760f80",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day10/事件.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27282"
},
{
"name": "Python",
"bytes": "139220"
}
],
"symlink_target": ""
} |
"""
This module provides common parsing regular expressions and functinos which
occur across multiple log formats. End users should never need to reference
this module.
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
from lars import datatypes as dt
str = type('') # pylint: disable=redefined-builtin,invalid-name
# Note - we do NOT try and validate URLs with this regex (as to do so is
# incredibly complicated and much better left to a function), merely perform
# some rudimentary extraction. The complex stuff below is derived from RFC3986
# appendix B.
_URL = r'([^:/?#\s]+:)?(//[^/?#\s]*)?[^?#\s]*(\?[^#\s]*)?(#\S*)?'
# The following regex for paths is ridiculously lax (and practically guaranteed
# to make any undelimited regex containing it ambiguous. Unfortunately there's
# not much we can do about this as none of the log formats escape filename
# fields! In other words, it's down to users not to use nutty filenames and to
# specify log formats containing sensible delims around any paths
_PATH = r'([^\x00-\x1f\x7f]*)'
# Extension methods can potentially be used, hence this regex just matches the
# "token" production in RFC2616 2.2. Note that this regex cannot match "-"
# because a method *within a request* cannot be unknown (see REQUEST below for
# more information).
_METHOD = r'[^\x00-\x1f\x7f(){}<>[\]@,;:\\"/?= \t]+'
# Same goes for HTTP PROTOCOL - can never be "-".
_PROTOCOL = r'HTTP/\d+\.\d+'
# In the following regexes, there must be a single group which covers the
# entire match. The group must be a named group with the name %(name)s, which
# will be substituted for the Python-ified field name in the regex constructed
# for row matching. Note that most regexes also match "-" which is used almost
# universally in web-logging systems to indicate a NULL value.
INTEGER = r'(?P<%(name)s>-|\d+)'
FIXED = r'(?P<%(name)s>-|\d+(\.\d*)?)'
DATE_ISO = r'(?P<%(name)s>-|\d{4}-\d{2}-\d{2})'
TIME_ISO = r'(?P<%(name)s>-|\d{2}:\d{2}:\d{2})'
# The reason for the empty "-" production appearing on the right is due to an
# issue with disjuncts in Perl-style regex implementations, see
# <http://lingpipe-blog.com/2008/05/07/tokenization-vs-eager-regular-expressions/>
#
# Note that the empty production "-" is possible for METHOD, PROTOCOL and
# REQUEST (e.g. due to request timeout), however the method cannot be empty
# ("-") unless the *entire* request is empty hence why the empty match "-" is
# only introduced here and not in the regexes above.
URL = r'(?P<%%(name)s>%s|-)' % _URL
PATH = r'(?P<%%(name)s>%s|-)' % _PATH
METHOD = r'(?P<%%(name)s>%s|-)' % _METHOD
PROTOCOL = r'(?P<%%(name)s>%s|-)' % _PROTOCOL
REQUEST = r'(?P<%%(name)s>%s %s %s|-)' % (_METHOD, _URL, _PROTOCOL)
# Doing DNS (or IP) validation is extremely hard to do properly with regexes so
# here we use a trivial regex to pull out a string containing the right
# alphabet and do validation in a function
HOSTNAME = r'(?P<%(name)s>-|[a-zA-Z0-9:.-]+)'
# Again, regex validation of IP addresses is extremely hard to do properly so
# we perform validation later in a function
ADDRESS = r'(?P<%(name)s>-|[0-9]+(\.[0-9]+){3}|[0-9a-fA-F:]+)'
ADDRESS_PORT = (
r'(?P<%(name)s>'
r'-|([0-9]+(\.[0-9]+){3}|\[[0-9a-fA-F:]+\])(:[0-9]{1,5})?)'
)
def request_parse(s):
"""
Parse an HTTP request line in a log file.
This is a basic function that simply returns the three components of a
request line (method, url, and protocol) as tuple. If URL is "*" (denoting
a missing URL for methods which do not require one, like OPTIONS), the
middle element of the returned tuple will be None.
:param str s: The string containing the request line to parse
:returns: A :class:`~lars.datatypes.Request` tuple representing the
request line
"""
return dt.request(s) if s != '-' else None
def url_parse(s):
"""
Parse a URL string in a log file.
This is a variant on the standard Python urlparse.urlparse function. The
result type has been extended to include a
:meth:`~lars.datatypes.Url.__str__` method which outputs the
reconstructed URL, and to have specialized hostname and path properties
which return enhanced objects instead of simple strings.
:param str s: The string containing the URI to parse
:returns: A :class:`~lars.datatypes.Url` tuple representing the URL
"""
return dt.url(s) if s not in ('-', '') else None
def path_parse(s):
"""
Parse a POSIX-style (slash separated) path string in a log file.
:param str s: The srting containing the POSIX-style path to parse
:returns: A :class:`~lars.datatypes.Path` object representing the path
"""
return dt.path(s) if s != '-' else None
def int_parse(s):
"""
Parse an integer string in a log file.
This is a simple variant on int() that returns None in the case of a single
dash being passed to s.
:param str s: The string containing the integer number to parse
:returns: An int value
"""
return int(s) if s != '-' else None
def fixed_parse(s):
"""
Parse an floating point string in a log file.
This is a simple variant on float() that returns None in the case of a
single dash being passed to s.
:param str s: The string containing the floating point number to parse
:returns: An float value
"""
return float(s) if s != '-' else None
def date_parse(s, format='%Y-%m-%d'):
"""
Parse a date string in a log file.
:param str s: The string containing the date to parse
:param str format: The optional strftime(3) format string
:returns: A :class:`~lars.datatypes.Date` object representing the date
"""
# pylint: disable=redefined-builtin
return dt.date(s, format) if s != '-' else None
def time_parse(s, format='%H:%M:%S'):
"""
Parse a time string in a IIS extended log format file.
:param str s: The string containing the time to parse (HH:MM:SS format)
:param str format: The optional strftime(3) format string
:returns: A :class:`~lars.datatypes.Time` object representing the time
"""
# pylint: disable=redefined-builtin
return dt.time(s, format) if s != '-' else None
def hostname_parse(s):
"""
Parse a DNS name in a log format.
:param str s: The string containing the DNS name to parse
:returns: A :class:`~lars.datatypes.Hostname` value
"""
return dt.hostname(s) if s != '-' else None
def address_parse(s):
"""
Parse an IPv4 or IPv6 address (and optional port) in a log file.
:param str s: The string containing the address to parse
:returns: A :class:`~lars.datatypes.IPv4Address` value
"""
return dt.address(s) if s != '-' else None
| {
"content_hash": "d93c6d41dabb985f703547809fd8519f",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 82,
"avg_line_length": 34.56122448979592,
"alnum_prop": 0.6668142899320934,
"repo_name": "waveform80/lars",
"id": "2320875483ba6bb2e4981283cfb42f0d04d992ec",
"size": "8032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lars/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7205"
},
{
"name": "Python",
"bytes": "333718"
}
],
"symlink_target": ""
} |
__all__ = [
"BadYamlError",
"MultipleDocumentsInFpYaml",
"HttpFetchError",
"AbortedFetchError",
]
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadYamlError(Error):
"""Raise when the mapreduce.yaml file is invalid."""
class MultipleDocumentsInFpYaml(BadYamlError):
"""There's more than one document in fetch_policy.yaml file."""
class HttpFetchError(Error):
"""Raise when the HTTP Status is errored."""
class RedirectError(Error):
"""Raise when the HTTP Status is errored."""
class AbortedFetchError(Error):
"""Raise when need the fetch abort"""
| {
"content_hash": "9789c5454d10d42f800cfc904dfa65d6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 24.92,
"alnum_prop": 0.6966292134831461,
"repo_name": "Letractively/lakshmi",
"id": "7cd4f2af5f09a33aa54b88301b1e37f389857a1d",
"size": "1226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/lakshmi/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "93747"
},
{
"name": "Python",
"bytes": "194613"
}
],
"symlink_target": ""
} |
import program as mips
import tkinter as tk
import conversion as conv
import instruction as ins
from tkinter import filedialog
import time
class MIPSApplication(tk.Frame):
def __init__(self, master = None):
tk.Frame.__init__(self, master)
self.pack()
self.create_widgets()
# default configuration of window
root.resizable(0,0)
root.geometry("1200x600")
root.wm_title("MIPS Simulator")
self.job = None
def create_widgets(self):
# create menu
self.menu = tk.Menu(self)
root["menu"] = self.menu
# add file menu
file_menu = tk.Menu(self.menu)
file_menu.add_command(label = "Open", command = self.load_input_file)
file_menu.add_command(label = "Exit", command = root.destroy)
self.menu.add_cascade(menu = file_menu, label = "File")
# create grid layout for progam
self.grid(column = 0, row = 0)
# create text areas
self.input_text = tk.Text(self, width = 43, height = 35)
self.input_text["background"] = "grey"
self.input_text.grid(column = 10, row = 0)
self.bin_text = tk.Text(self, width = 42, height = 35)
self.bin_text["background"] = "grey"
self.bin_text.grid(column = 15, row = 0)
self.stack_text = tk.Text(self, width = 24, height = 35)
self.stack_text["background"] = "grey"
self.stack_text.grid(column = 20, row = 0)
self.register_text = tk.Text(self, width = 19, height = 35)
self.register_text["background"] = "grey"
self.register_text.grid(column = 25, row = 0)
# lock text areas
self.lock_text()
# create controls
# Control Panel
self.control_panel = tk.Frame(self, width=400, height=800)
button_panel = tk.Frame(self.control_panel)
self.control_panel.grid(column = 1, row = 0)
# Slider
self.speed_slider = tk.Scale(self.control_panel, orient="horizontal")
self.speed_slider["from"] = 1
self.speed_slider["to"] = 60
self.speed_slider.grid(column=0, row=0)
# Buttons
button_panel.grid(column=0,row=2, columnspan=2)
self.step_button = tk.Button(button_panel, text="Step", command=self.step_once, state = "disabled")
self.step_button.grid(column=0, row=0)
self.run_button = tk.Button(button_panel, text="Run", command=self.run_prog, state = "disabled")
self.run_button.grid(column = 1, row = 0)
self.stop_button = tk.Button(button_panel, text="Stop", command=self.stop_prog, state = "disabled")
self.stop_button.grid(column = 0, row = 1)
self.reset_button = tk.Button(button_panel, text="Reset", command=self.reset, state = "disabled")
self.reset_button.grid(column = 1, row = 1)
def lock_text(self):
self.input_text["state"] = "disabled"
self.bin_text["state"] = "disabled"
self.stack_text["state"] = "disabled"
self.register_text["state"] = "disabled"
def unlock_text(self):
self.input_text["state"] = "normal"
self.bin_text["state"] = "normal"
self.stack_text["state"] = "normal"
self.register_text["state"] = "normal"
def get_file(self):
filename = tk.filedialog.askopenfilename()
if filename == "": return
# allow editing on our text fields and wipe them
self.input_text["state"] = "normal"
self.bin_text["state"] = "normal"
self.input_text.delete("1.0", "end")
self.bin_text.delete("1.0", "end")
return filename
def get_lines(self, filename):
# create a list to contain the lines
lines = []
# iterate through all the lines in filename
with open(filename) as f:
for line in f:
lines.append(line)
return lines
def update_input(self, lines=None):
if lines != None:
print(">Updating input")
i = 1
for line in lines:
content = str(i) + ":\t" + str(line)
self.input_text.insert("end", content)
i += 1
# remove old contents and any highlighting bin_text may have had
if "highlight" in self.input_text.tag_names():
self.input_text.tag_delete("highlight")
# highlight line indicated by the program counter
# this takes a bit more work, because you need to account for labels
# and the occasional empty line, so I store the line numbe in
# instructions objects as a bit of a workaround
if not self.program.is_finished():
next_inst = self.program.instructions[self.program.pc // ins.Instruction.SIZE]
self.highlight(self.input_text, next_inst.line_number + 1)
def update_bin(self):
print(">Updating binary")
# remove old contents and any highlighting bin_text may have had
self.bin_text.delete("1.0", "end")
if "highlight" in self.bin_text.tag_names():
self.bin_text.tag_delete("highlight")
i = 0
for line in self.program.machine_code:
content = str(hex(i)) + ":\t" + str(line)
self.bin_text.insert("end", content + "\n")
i += 4
# highlight line indicated by the program counter
self.highlight(self.bin_text, self.program.pc // ins.Instruction.SIZE + 1)
# given a text area, highlight a given line
def highlight(self, textarea, line_number):
# remove previous highlighting
if "highlight" in textarea.tag_names():
textarea.tag_delete("highlight")
# now apply highlighting on given line
textarea.tag_add("highlight", str(line_number) + ".0", str(line_number) + ".end")
textarea.tag_config("highlight", background="yellow")
print("highlighting")
def update_stack(self):
print(">Updating stack")
self.stack_text.delete("1.0", "end")
stack = self.program.get_stack()
contents = stack.get_contents()
sp = self.program.get_register(29)
if not(sp in contents):
stack.store_word(sp, "-----")
sorted_stack = sorted(contents, reverse = True)
for item in sorted_stack:
output = str(hex(item)) + ":\t" + str(contents[item])
self.stack_text.insert("end", output)
if(sp == item):
self.stack_text.insert("end", "\t<--$sp")
self.stack_text.insert("end", "\n")
def update_registers(self):
print(">Updating registers")
self.register_text.delete("1.0", "end")
registers = self.program.get_all_registers()
self.register_text["state"] = "normal"
regnum = 0
for register in registers:
output = "$" + str(regnum) + ":\t" + str(register) + "\n"
self.register_text.insert("end", output)
regnum += 1
self.register_text.insert("end", "$HI:\t" + str(self.program.get_hi()) + "\n")
self.register_text.insert("end", "$LO:\t" + str(self.program.get_lo()) + "\n")
def step_once(self):
print(">Stepping once")
self.reset_button["state"] = "normal"
self.program.step_once()
self.unlock_text()
self.update_input()
self.update_bin()
self.update_stack()
self.update_registers()
self.lock_text()
def run_prog(self):
print(">Running program")
self.reset_button["state"] = "normal"
self.stop_button["state"] = "normal"
self.step_once()
if not self.program.is_finished():
self.job = root.after(1000//self.speed_slider.get(), self.run_prog)
else:
self.job = None
def stop_prog(self):
print(">Stopping program")
self.stop_button["state"] = "disabled"
if self.job is not None:
root.after_cancel(self.job)
self.job = None
def reset(self):
print(">Resetting simulator")
self.reset_button["state"] = "disabled"
self.stop_button["state"] = "disabled"
self.program.reset()
self.unlock_text()
self.update_input()
self.update_bin()
self.update_stack()
self.update_registers()
self.lock_text()
def load_input_file(self):
filename = self.get_file()
if filename == None: return
lines = self.get_lines(filename)
self.program = mips.Program(lines)
self.unlock_text()
self.update_input(lines)
self.update_bin()
self.update_stack()
self.update_registers()
self.lock_text()
self.step_button["state"] = "normal"
self.run_button["state"] = "normal"
root = tk.Tk()
app = MIPSApplication(master = root)
app.mainloop()
| {
"content_hash": "7d06ee57f964cde6e05120c1645fd223",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 101,
"avg_line_length": 31.609958506224068,
"alnum_prop": 0.6720924127067471,
"repo_name": "rosshays/mips-simulation",
"id": "d6f2a5ed9733f97e6b969638857c2d1043a14b42",
"size": "7618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33342"
}
],
"symlink_target": ""
} |
from .cz_eru_scraper import CZ_ERU_Scraper
class ScraperFactory(object):
"""docstring for ScraperFactory"""
def __init__(self):
super(ScraperFactory, self).__init__()
def getScraper(country, source_name, url):
if country == 'CZ' and source_name == 'ERU':
return CZ_ERU_Scraper(url) | {
"content_hash": "86fe602ab80db8d1760be78338c86c55",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 29.3,
"alnum_prop": 0.6962457337883959,
"repo_name": "Open-Power-System-Data/renewable_power_plants",
"id": "34683d36a35699ab2c632b819f3c519593a3c7ff",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/scrapers/scraper_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24083"
},
{
"name": "Jupyter Notebook",
"bytes": "307060"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
from matplotlib.pyplot import triplot
from mpl_toolkits.mplot3d import Axes3D
from pycrest.mesh import Mesh2d
def plot_triangulation(tri: Mesh2d, standalone=True, *args, **kwargs):
figure = plt.figure() if standalone else None
triplot(tri.vertices[:, 0], tri.vertices[:, 1], tri.elements, *args, **kwargs)
xmin = tri.vertices[:, 0].min()
xmax = tri.vertices[:, 0].max()
ymin = tri.vertices[:, 1].min()
ymax = tri.vertices[:, 1].max()
padding = 0.05 * (max(xmax - xmin, ymax - ymin))
plt.axis('square')
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((xmin - padding, xmax + padding))
plt.ylim((ymin - padding, ymax + padding))
if figure:
figure.show()
| {
"content_hash": "185302847ef07417f44cbd7b9872a816",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6418918918918919,
"repo_name": "Andlon/crest",
"id": "6cc4dbbfdd4b53123ada04b2e051698fcbeaf958",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycrest/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "494891"
},
{
"name": "CMake",
"bytes": "3988"
},
{
"name": "Jupyter Notebook",
"bytes": "17535"
},
{
"name": "Python",
"bytes": "9485"
},
{
"name": "Shell",
"bytes": "605"
}
],
"symlink_target": ""
} |
""" Classes and functions for Symmetric Diffeomorphic Registration """
from __future__ import print_function
import abc
from dipy.utils.six import with_metaclass
import numpy as np
import numpy.linalg as npl
import scipy as sp
import nibabel as nib
import dipy.align.vector_fields as vfu
from dipy.align import floating
from dipy.align import VerbosityLevels
from dipy.align import Bunch
RegistrationStages = Bunch(INIT_START=0,
INIT_END=1,
OPT_START=2,
OPT_END=3,
SCALE_START=4,
SCALE_END=5,
ITER_START=6,
ITER_END=7)
r"""Registration Stages
This enum defines the different stages which the Volumetric Registration
may be in. The value of the stage is passed as a parameter to the call-back
function so that it can react accordingly.
INIT_START: optimizer initialization starts
INIT_END: optimizer initialization ends
OPT_START: optimization starts
OPT_END: optimization ends
SCALE_START: optimization at a new scale space resolution starts
SCALE_END: optimization at the current scale space resolution ends
ITER_START: a new iteration starts
ITER_END: the current iteration ends
"""
def mult_aff(A, B):
r"""Returns the matrix product A.dot(B) considering None as the identity
Parameters
----------
A : array, shape (n,k)
B : array, shape (k,m)
Returns
-------
The matrix product A.dot(B). If any of the input matrices is None, it is
treated as the identity matrix. If both matrices are None, None is returned.
"""
if A is None:
return B
elif B is None:
return A
return A.dot(B)
def get_direction_and_spacings(affine, dim):
r"""Extracts the rotational and spacing components from a matrix
Extracts the rotational and spacing (voxel dimensions) components from a
matrix. An image gradient represents the local variation of the image's gray
values per voxel. Since we are iterating on the physical space, we need to
compute the gradients as variation per millimeter, so we need to divide each
gradient's component by the voxel size along the corresponding axis, that's
what the spacings are used for. Since the image's gradients are oriented
along the grid axes, we also need to re-orient the gradients to be given
in physical space coordinates.
Parameters
----------
affine : array, shape (k, k), k = 3, 4
the matrix transforming grid coordinates to physical space.
Returns
-------
direction : array, shape (k-1, k-1)
the rotational component of the input matrix
spacings : array, shape (k-1,)
the scaling component (voxel size) of the matrix
"""
if affine is None:
return np.eye(dim), np.ones(dim)
dim = affine.shape[1]-1
#Temporary hack: get the zooms by building a nifti image
affine4x4 = np.eye(4)
empty_volume = np.zeros((0,0,0))
affine4x4[:dim, :dim] = affine[:dim, :dim]
affine4x4[:dim, 3] = affine[:dim, dim-1]
nib_nifti = nib.Nifti1Image(empty_volume, affine4x4)
scalings = np.asarray(nib_nifti.get_header().get_zooms())
scalings = np.asarray(scalings[:dim], dtype = np.float64)
A = affine[:dim,:dim]
return A.dot(np.diag(1.0/scalings)), scalings
class ScaleSpace(object):
def __init__(self, image, num_levels,
image_grid2world=None,
input_spacing=None,
sigma_factor=0.2,
mask0=False):
r""" ScaleSpace
Computes the Scale Space representation of an image. The scale space is
simply a list of images produced by smoothing the input image with a
Gaussian kernel with increasing smoothing parameter. If the image's
voxels are isotropic, the smoothing will be the same along all
directions: at level L = 0,1,..., the sigma is given by s * ( 2^L - 1 ).
If the voxel dimensions are not isotropic, then the smoothing is
weaker along low resolution directions.
Parameters
----------
image : array, shape (r,c) or (s, r, c) where s is the number of slices,
r is the number of rows and c is the number of columns of the input
image.
num_levels : int
the desired number of levels (resolutions) of the scale space
image_grid2world : array, shape (k, k), k=3,4 (for either 2D or 3D images)
the grid-to-space transform of the image grid
input_spacing : array, shape (k-1,)
the spacing (voxel size) between voxels in physical space
sigma_factor : float
the smoothing factor to be used in the construction of the scale
space.
mask0 : Boolean
if True, all smoothed images will be zero at all voxels that are
zero in the input image.
"""
self.dim = len(image.shape)
self.num_levels = num_levels
input_size = np.array(image.shape)
if mask0:
mask = np.asarray(image>0, dtype=np.int32)
#normalize input image to [0,1]
img = (image - image.min())/(image.max() - image.min())
if mask0:
img *= mask
#The properties are saved in separate lists. Insert input image
#properties at the first level of the scale space
self.images = [img.astype(floating)]
self.domain_shapes = [input_size.astype(np.int32)]
if input_spacing is None:
input_spacing = np.ones((self.dim,), dtype = np.int32)
self.spacings = [input_spacing]
self.scalings = [np.ones(self.dim)]
self.affines = [image_grid2world]
self.sigmas = [np.zeros(self.dim)]
if image_grid2world is not None:
self.affine_invs = [npl.inv(image_grid2world)]
else:
self.affine_invs = [None]
#compute the rest of the levels
min_spacing = np.min(input_spacing)
for i in range(1, num_levels):
scaling_factor = 2**i
scaling = np.ndarray((self.dim+1,))
#Note: the minimum below is present in ANTS to prevent the scaling
#from being too large (making the sub-sampled image to be too small)
#this makes the sub-sampled image at least 32 voxels at each
#direction it is risky to make this decision based on image size,
#though (we need to investigate more the effect of this)
#scaling = np.minimum(scaling_factor * min_spacing / input_spacing,
# input_size / 32)
scaling = scaling_factor * min_spacing / input_spacing
output_spacing = input_spacing * scaling
extended = np.append(scaling, [1])
if not image_grid2world is None:
affine = image_grid2world.dot(np.diag(extended))
else:
affine = np.diag(extended)
output_size = input_size * (input_spacing / output_spacing) + 0.5
output_size = output_size.astype(np.int32)
sigmas = sigma_factor * (output_spacing / input_spacing - 1.0)
#filter along each direction with the appropriate sigma
filtered = sp.ndimage.filters.gaussian_filter(image, sigmas)
filtered = ((filtered - filtered.min())/
(filtered.max() - filtered.min()))
if mask0:
filtered *= mask
#Add current level to the scale space
self.images.append(filtered.astype(floating))
self.domain_shapes.append(output_size)
self.spacings.append(output_spacing)
self.scalings.append(scaling)
self.affines.append(affine)
self.affine_invs.append(npl.inv(affine))
self.sigmas.append(sigmas)
def get_expand_factors(self, from_level, to_level):
r"""Ratio of voxel size from pyramid level from_level to to_level
Given two scale space resolutions a = from_level, b = to_level,
returns the ratio of voxels size at level b to voxel size at level a
(the factor that must be used to multiply voxels at level a to
'expand' them to level b).
Parameters
----------
from_level : int, 0 <= from_level < L, (L = number of resolutions)
the resolution to expand voxels from
to_level : int, 0 <= to_level < from_level
the resolution to expand voxels to
Returns
-------
factors : array, shape (k,), k = 2, 3
the expand factors (a scalar for each voxel dimension)
"""
factors = (np.array(self.spacings[to_level]) /
np.array(self.spacings[from_level]) )
return factors
def print_level(self, level):
r"""Prints properties of a pyramid level
Prints the properties of a level of this scale space to standard output
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to be printed
"""
print('Domain shape: ', self.get_domain_shape(level))
print('Spacing: ', self.get_spacing(level))
print('Scaling: ', self.get_scaling(level))
print('Affine: ', self.get_affine(level))
print('Sigmas: ', self.get_sigmas(level))
def _get_attribute(self, attribute, level):
r"""Returns an attribute from the Scale Space at a given level
Returns the level-th element of attribute if level is a valid level
of this scale space. Otherwise, returns None.
Parameters
----------
attribute : list
the attribute to retrieve the level-th element from
level : int,
the index of the required element from attribute.
Returns
-------
attribute[level] : object
the requested attribute if level is valid, else it raises
a ValueError
"""
if 0 <= level < self.num_levels:
return attribute[level]
raise ValueError('Invalid pyramid level: '+str(level))
def get_image(self, level):
r"""Smoothed image at a given level
Returns the smoothed image at the requested level in the Scale Space.
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the smooth image from
Returns
-------
the smooth image at the requested resolution or None if an invalid
level was requested
"""
return self._get_attribute(self.images, level)
def get_domain_shape(self, level):
r"""Shape the sub-sampled image must have at a particular level
Returns the shape the sub-sampled image must have at a particular
resolution of the scale space (note that this object does not explicitly
subsample the smoothed images, but only provides the properties
the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the sub-sampled shape from
Returns
-------
the sub-sampled shape at the requested resolution or None if an
invalid level was requested
"""
return self._get_attribute(self.domain_shapes, level)
def get_spacing(self, level):
r"""Spacings the sub-sampled image must have at a particular level
Returns the spacings (voxel sizes) the sub-sampled image must have at a
particular resolution of the scale space (note that this object does
not explicitly subsample the smoothed images, but only provides the
properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the sub-sampled shape from
Returns
-------
the spacings (voxel sizes) at the requested resolution or None if an
invalid level was requested
"""
return self._get_attribute(self.spacings, level)
def get_scaling(self, level):
r"""Adjustment factor for input-spacing to reflect voxel sizes at level
Returns the scaling factor that needs to be applied to the input spacing
(the voxel sizes of the image at level 0 of the scale space) to
transform them to voxel sizes at the requested level.
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the scalings from
Returns
-------
the scaling factors from the original spacing to the spacings at the
requested level
"""
return self._get_attribute(self.scalings, level)
def get_affine(self, level):
r"""Voxel-to-space transformation at a given level
Returns the voxel-to-space transformation associated to the sub-sampled
image at a particular resolution of the scale space (note that this
object does not explicitly subsample the smoothed images, but only
provides the properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get affine transform from
Returns
-------
the affine (voxel-to-space) transform at the requested resolution or
None if an invalid level was requested
"""
return self._get_attribute(self.affines, level)
def get_affine_inv(self, level):
r"""Space-to-voxel transformation at a given level
Returns the space-to-voxel transformation associated to the sub-sampled
image at a particular resolution of the scale space (note that this
object does not explicitly subsample the smoothed images, but only
provides the properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the inverse transform from
Returns
-------
the inverse (space-to-voxel) transform at the requested resolution or
None if an invalid level was requested
"""
return self._get_attribute(self.affine_invs, level)
def get_sigmas(self, level):
r"""Smoothing parameters used at a given level
Returns the smoothing parameters (a scalar for each axis) used at the
requested level of the scale space
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the smoothing parameters from
Returns
-------
the smoothing parameters at the requested level
"""
return self._get_attribute(self.sigmas, level)
class DiffeomorphicMap(object):
def __init__(self,
dim,
disp_shape,
disp_grid2world=None,
domain_shape=None,
domain_grid2world=None,
codomain_shape=None,
codomain_grid2world=None,
prealign=None):
r""" DiffeomorphicMap
Implements a diffeomorphic transformation on the physical space. The
deformation fields encoding the direct and inverse transformations
share the same domain discretization (both the discretization grid shape
and voxel-to-space matrix). The input coordinates (physical coordinates)
are first aligned using prealign, and then displaced using the
corresponding vector field interpolated at the aligned coordinates.
Parameters
----------
dim : int, 2 or 3
the transformation's dimension
disp_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the deformation
field's discretization
disp_grid2world : the voxel-to-space transformation between the deformation field's
grid and space
domain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the default
discretizatio of this map's domain
domain_grid2world : array, shape (dim+1, dim+1)
the default voxel-to-space transformation between this map's
discretization and physical space
codomain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the images that
are 'normally' warped using this transformation in the forward
direction (this will provide default transformation parameters to
warp images under this transformation). By default, we assume that
the inverse transformation is 'normally' used to warp images with
the same discretization and voxel-to-space transformation as the
deformation field grid.
codomain_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of images that are 'normally'
warped using this transformation (in the forward direction).
prealign : array, shape (dim+1, dim+1)
the linear transformation to be applied to align input images to
the reference space before warping under the deformation field.
"""
self.dim = dim
if(disp_shape is None):
raise ValueError("Invalid displacement field discretization")
self.disp_shape = np.asarray(disp_shape, dtype = np.int32)
# If the discretization affine is None, we assume it's the identity
self.disp_grid2world = disp_grid2world
if(self.disp_grid2world is None):
self.disp_world2grid = None
else:
self.disp_world2grid = npl.inv(self.disp_grid2world)
# If domain_shape is not provided, we use the map's discretization shape
if(domain_shape is None):
self.domain_shape = self.disp_shape
else:
self.domain_shape = np.asarray(domain_shape, dtype = np.int32)
self.domain_grid2world = domain_grid2world
if(domain_grid2world is None):
self.domain_world2grid = None
else:
self.domain_world2grid = npl.inv(domain_grid2world)
# If codomain shape was not provided, we assume it is an endomorphism:
# use the same domain_shape and codomain_grid2world as the field domain
if codomain_shape is None:
self.codomain_shape = self.domain_shape
else:
self.codomain_shape = np.asarray(codomain_shape, dtype = np.int32)
self.codomain_grid2world = codomain_grid2world
if codomain_grid2world is None:
self.codomain_world2grid = None
else:
self.codomain_world2grid = npl.inv(codomain_grid2world)
self.prealign = prealign
if prealign is None:
self.prealign_inv = None
else:
self.prealign_inv = npl.inv(prealign)
self.is_inverse = False
self.forward = None
self.backward = None
def get_forward_field(self):
r"""Deformation field to transform an image in the forward direction
Returns the deformation field that must be used to warp an image under
this transformation in the forward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.backward
else:
return self.forward
def get_backward_field(self):
r"""Deformation field to transform an image in the backward direction
Returns the deformation field that must be used to warp an image under
this transformation in the backward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.forward
else:
return self.backward
def allocate(self):
r"""Creates a zero displacement field
Creates a zero displacement field (the identity transformation).
"""
self.forward = np.zeros(tuple(self.disp_shape)+(self.dim,),
dtype=floating)
self.backward = np.zeros(tuple(self.disp_shape)+(self.dim,),
dtype=floating)
def _get_warping_function(self, interpolation):
r"""Appropriate warping function for the given interpolation type
Returns the right warping function from vector_fields that must be
called for the specified data dimension and interpolation type
"""
if self.dim == 2:
if interpolation == 'linear':
return vfu.warp_2d
else:
return vfu.warp_2d_nn
else:
if interpolation == 'linear':
return vfu.warp_3d
else:
return vfu.warp_3d_nn
def _warp_forward(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
r"""Warps an image in the forward direction
Deforms the input image under this diffeomorphic map in the forward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'forward' is
precisely mapping coordinates i from the input image to coordinates j
from reference image, which has the effect of warping an image with
reference discretization (typically, the "static image") "towards" an
image with input discretization (typically, the "moving image"). More
precisely, the warped image is produced by the following interpolation:
warped[i] = image[W * forward[Dinv * P * S * i] + W * P * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, P is the pre-aligning matrix (transforming input
points to reference points), S is the voxel-to-space transformation of
the sampling grid (see comment below) and forward is the forward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
#if no world-to-image transform is provided, we use the codomain info
if image_world2grid is None:
image_world2grid = self.codomain_world2grid
#if no sampling info is provided, we use the domain info
if out_shape is None:
if self.domain_shape is None:
raise ValueError('Unable to infer sampling info. '
'Provide a valid out_shape.')
out_shape = self.domain_shape
else:
out_shape = np.asarray(out_shape, dtype=np.int32)
if out_grid2world is None:
out_grid2world = self.domain_grid2world
W = None if image_world2grid == 'identity' else image_world2grid
Dinv = self.disp_world2grid
P = self.prealign
S = None if out_grid2world == 'identity' else out_grid2world
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the forward displacement field ("in"side the
#'forward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, mult_aff(P, S))
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'forward' brackets in the
#expression above)
affine_idx_out = mult_aff(W, mult_aff(P, S))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = W
#Convert the data to the required types to use the cythonized functions
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.forward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def _warp_backward(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
r"""Warps an image in the backward direction
Deforms the input image under this diffeomorphic map in the backward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the backward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.domain_shape if None
the warped image under this transformation in the backward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'backward' is
precisely mapping coordinates i from the reference grid to coordinates j
from the input image (that's why it's "backward"), which has the effect
of warping the input image (moving) "towards" the reference. More
precisely, the warped image is produced by the following interpolation:
warped[i]= image[W * Pinv * backward[Dinv * S * i] + W * Pinv * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, Pinv is the pre-aligning matrix's inverse (transforming
reference points to input points), S is the grid-to-space transformation
of the sampling grid (see comment below) and backward is the backward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
#if no world-to-image transform is provided, we use the domain info
if image_world2grid is None:
image_world2grid = self.domain_world2grid
#if no sampling info is provided, we use the codomain info
if out_shape is None:
if self.codomain_shape is None:
raise ValueError('Unable to infer sampling info. Provide a valid out_shape.')
out_shape = self.codomain_shape
if out_grid2world is None:
out_grid2world = self.codomain_grid2world
W = None if image_world2grid == 'identity' else image_world2grid
Dinv = self.disp_world2grid
Pinv = self.prealign_inv
S = None if out_grid2world == 'identity' else out_grid2world
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the backward displacement field ("in"side the
#'backward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, S)
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'backward' brackets in the
#expression above)
affine_idx_out = mult_aff(W, mult_aff(Pinv, S))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = mult_aff(W, Pinv)
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.backward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def transform(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
r"""Warps an image in the forward direction
Transforms the input image under this transformation in the forward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform(...) warps the
image forwards, else it warps the image backwards).
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if out_shape is not None:
out_shape = np.asarray(out_shape, dtype=np.int32)
if self.is_inverse:
warped = self._warp_backward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
else:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
return np.asarray(warped)
def transform_inverse(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
r"""Warps an image in the backward direction
Transforms the input image under this transformation in the backward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform_inverse(...)
warps the image backwards, else it warps the image forwards)
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the backward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if self.is_inverse:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
else:
warped = self._warp_backward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
return np.asarray(warped)
def inverse(self):
r"""Inverse of this DiffeomorphicMap instance
Returns a diffeomorphic map object representing the inverse of this
transformation. The internal arrays are not copied but just referenced.
Returns
-------
inv : DiffeomorphicMap object
the inverse of this diffeomorphic map.
"""
inv = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
inv.forward = self.forward
inv.backward = self.backward
inv.is_inverse = True
return inv
def expand_fields(self, expand_factors, new_shape):
r"""Expands the displacement fields from current shape to new_shape
Up-samples the discretization of the displacement fields to be of
new_shape shape.
Parameters
----------
expand_factors : array, shape (dim,)
the factors scaling current spacings (voxel sizes) to spacings in
the expanded discretization.
new_shape : array, shape (dim,)
the shape of the arrays holding the up-sampled discretization
"""
if self.dim == 2:
expand_f = vfu.resample_displacement_field_2d
else:
expand_f = vfu.resample_displacement_field_3d
expanded_forward = expand_f(self.forward, expand_factors, new_shape)
expanded_backward = expand_f(self.backward, expand_factors, new_shape)
expand_factors = np.append(expand_factors, [1])
expanded_grid2world = mult_aff(self.disp_grid2world,
np.diag(expand_factors))
expanded_world2grid = npl.inv(expanded_grid2world)
self.forward = expanded_forward
self.backward = expanded_backward
self.disp_shape = new_shape
self.disp_grid2world = expanded_grid2world
self.disp_world2grid = expanded_world2grid
def compute_inversion_error(self):
r"""Inversion error of the displacement fields
Estimates the inversion error of the displacement fields by computing
statistics of the residual vectors obtained after composing the forward
and backward displacement fields.
Returns
-------
residual : array, shape (R, C) or (S, R, C)
the displacement field resulting from composing the forward and
backward displacement fields of this transformation (the residual
should be zero for a perfect diffeomorphism)
stats : array, shape (3,)
statistics from the norms of the vectors of the residual
displacement field: maximum, mean and standard deviation
Notes
-----
Since the forward and backward displacement fields have the same
discretization, the final composition is given by
comp[i] = forward[ i + Dinv * backward[i]]
where Dinv is the space-to-grid transformation of the displacement
fields
"""
Dinv = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
residual, stats = compose_f(self.backward, self.forward,
None, Dinv, 1.0, None)
return np.asarray(residual), np.asarray(stats)
def shallow_copy(self):
r"""Shallow copy of this DiffeomorphicMap instance
Creates a shallow copy of this diffeomorphic map (the arrays are not
copied but just referenced)
Returns
-------
new_map : DiffeomorphicMap object
the shallow copy of this diffeomorphic map
"""
new_map = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
new_map.forward = self.forward
new_map.backward = self.backward
new_map.is_inverse = self.is_inverse
return new_map
def warp_endomorphism(self, phi):
r"""Composition of this DiffeomorphicMap with a given endomorphism
Creates a new DiffeomorphicMap C with the same properties as self and
composes its displacement fields with phi's corresponding fields.
The resulting diffeomorphism is of the form C(x) = phi(self(x)) with
inverse C^{-1}(y) = self^{-1}(phi^{-1}(y)). We assume that phi is an
endomorphism with the same discretization and domain affine as self
to ensure that the composition inherits self's properties (we also
assume that the pre-aligning matrix of phi is None or identity).
Parameters
----------
phi : DiffeomorphicMap object
the endomorphism to be warped by this diffeomorphic map
Returns
-------
composition : the composition of this diffeomorphic map with the
endomorphism given as input
Notes
-----
The problem with our current representation of a DiffeomorphicMap is
that the set of Diffeomorphism that can be represented this way (a
pre-aligning matrix followed by a non-linear endomorphism given as a
displacement field) is not closed under the composition operation.
Supporting a general DiffeomorphicMap class, closed under composition,
may be extremely costly computationally, and the kind of transformations
we actually need for Avants' mid-point algorithm (SyN) are much simpler.
"""
#Compose the forward deformation fields
d1 = self.get_forward_field()
d2 = phi.get_forward_field()
d1_inv = self.get_backward_field()
d2_inv = phi.get_backward_field()
premult_disp = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
forward, stats = compose_f(d1, d2, None, premult_disp, 1.0, None)
backward, stats, = compose_f(d2_inv, d1_inv, None, premult_disp, 1.0, None)
composition = self.shallow_copy()
composition.forward = forward
composition.backward = backward
return composition
def get_simplified_transform(self):
r""" Constructs a simplified version of this Diffeomorhic Map
The simplified version incorporates the pre-align transform, as well as
the domain and codomain affine transforms into the displacement field.
The resulting transformation may be regarded as operating on the
image spaces given by the domain and codomain discretization. As a
result, self.prealign, self.disp_grid2world, self.domain_grid2world and
self.codomain affine will be None (denoting Identity) in the resulting
diffeomorphic map.
"""
if self.dim == 2:
simplify_f = vfu.simplify_warp_function_2d
else:
simplify_f = vfu.simplify_warp_function_3d
# Simplify the forward transform
D = self.domain_grid2world
P = self.prealign
Rinv = self.disp_world2grid
Cinv = self.codomain_world2grid
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the forward displacement field ("in"side the
#'forward' brackets in the expression above)
affine_idx_in = mult_aff(Rinv, mult_aff(P, D))
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'forward' brackets in the
#expression above)
affine_idx_out = mult_aff(Cinv, mult_aff(P, D))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = Cinv
new_forward = simplify_f(self.forward, affine_idx_in,
affine_idx_out, affine_disp,
self.domain_shape)
# Simplify the backward transform
C = self.codomain_world2grid
Pinv = self.prealign_inv
Dinv = self.domain_world2grid
affine_idx_in = mult_aff(Rinv, C)
affine_idx_out = mult_aff(Dinv, mult_aff(Pinv, C))
affine_disp = mult_aff(Dinv, Pinv)
new_backward = simplify_f(self.backward, affine_idx_in,
affine_idx_out, affine_disp,
self.codomain_shape)
simplified = DiffeomorphicMap(self.dim,
self.disp_shape,
None,
self.domain_shape,
None,
self.codomain_shape,
None,
None)
simplified.forward = new_forward
simplified.backward = new_backward
return simplified
class DiffeomorphicRegistration(with_metaclass(abc.ABCMeta, object)):
def __init__(self, metric=None):
r""" Diffeomorphic Registration
This abstract class defines the interface to be implemented by any
optimization algorithm for diffeomorphic registration.
Parameters
----------
metric : SimilarityMetric object
the object measuring the similarity of the two images. The
registration algorithm will minimize (or maximize) the provided
similarity.
"""
if metric is None:
raise ValueError('The metric cannot be None')
self.metric = metric
self.dim = metric.dim
def set_level_iters(self, level_iters):
r"""Sets the number of iterations at each pyramid level
Establishes the maximum number of iterations to be performed at each
level of the Gaussian pyramid, similar to ANTS.
Parameters
----------
level_iters : list
the number of iterations at each level of the Gaussian pyramid.
level_iters[0] corresponds to the finest level, level_iters[n-1] the
coarsest, where n is the length of the list
"""
self.levels = len(level_iters) if level_iters else 0
self.level_iters = level_iters
@abc.abstractmethod
def optimize(self):
r"""Starts the metric optimization
This is the main function each specialized class derived from this must
implement. Upon completion, the deformation field must be available from
the forward transformation model.
"""
@abc.abstractmethod
def get_map(self):
r"""
Returns the resulting diffeomorphic map after optimization
"""
class SymmetricDiffeomorphicRegistration(DiffeomorphicRegistration):
def __init__(self,
metric,
level_iters=None,
step_length=0.25,
ss_sigma_factor=0.2,
opt_tol=1e-5,
inv_iter=20,
inv_tol=1e-3,
callback=None):
r""" Symmetric Diffeomorphic Registration (SyN) Algorithm
Performs the multi-resolution optimization algorithm for non-linear
registration using a given similarity metric.
Parameters
----------
metric : SimilarityMetric object
the metric to be optimized
level_iters : list of int
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used)
opt_tol : float
the optimization will stop when the estimated derivative of the
energy profile w.r.t. time falls below this threshold
inv_iter : int
the number of iterations to be performed by the displacement field
inversion algorithm
step_length : float
the length of the maximum displacement vector of the update
displacement field at each iteration
ss_sigma_factor : float
parameter of the scale-space smoothing kernel. For example, the
std. dev. of the kernel will be factor*(2^i) in the isotropic case
where i = 0, 1, ..., n_scales is the scale
inv_tol : float
the displacement field inversion algorithm will stop iterating
when the inversion error falls below this threshold
callback : function(SymmetricDiffeomorphicRegistration)
a function receiving a SymmetricDiffeomorphicRegistration object
to be called after each iteration (this optimizer will call this
function passing self as parameter)
"""
super(SymmetricDiffeomorphicRegistration, self).__init__(metric)
if level_iters is None:
level_iters = [100, 100, 25]
if len(level_iters) == 0:
raise ValueError('The iterations list cannot be empty')
self.set_level_iters(level_iters)
self.step_length = step_length
self.ss_sigma_factor = ss_sigma_factor
self.opt_tol = opt_tol
self.inv_tol = inv_tol
self.inv_iter = inv_iter
self.energy_window = 12
self.energy_list = []
self.full_energy_profile = []
self.verbosity = VerbosityLevels.STATUS
self.callback = callback
self.moving_ss = None
self.static_ss = None
self.static_direction = None
self.moving_direction = None
self.mask0 = metric.mask0
def update(self, current_displacement, new_displacement,
disp_world2grid, time_scaling):
r"""Composition of the current displacement field with the given field
Interpolates new displacement at the locations defined by
current_displacement. Equivalently, computes the composition C of the
given displacement fields as C(x) = B(A(x)), where A is
current_displacement and B is new_displacement. This function is
intended to be used with deformation fields of the same sampling
(e.g. to be called by a registration algorithm).
Parameters
----------
current_displacement : array, shape (R', C', 2) or (S', R', C', 3)
the displacement field defining where to interpolate
new_displacement
new_displacement : array, shape (R, C, 2) or (S, R, C, 3)
the displacement field to be warped by current_displacement
disp_world2grid : array, shape (dim+1, dim+1)
the space-to-grid transform associated with the displacements'
grid (we assume that both displacements are discretized over the
same grid)
time_scaling : float
scaling factor applied to d2. The effect may be interpreted as
moving d1 displacements along a factor (`time_scaling`) of d2.
Returns
-------
updated : array, shape (the same as new_displacement)
the warped displacement field
mean_norm : the mean norm of all vectors in current_displacement
"""
mean_norm = np.sqrt(np.sum((np.array(current_displacement) ** 2), -1)).mean()
# We assume that both displacement fields have the same
# grid2world transform, which implies premult_index=Identity
# and premult_disp is the world2grid transform associated with
# the displacements' grid
self.compose(current_displacement, new_displacement, None,
disp_world2grid, time_scaling, current_displacement)
return np.array(current_displacement), np.array(mean_norm)
def get_map(self):
r"""Returns the resulting diffeomorphic map
Returns the DiffeomorphicMap registering the moving image towards
the static image.
"""
return self.static_to_ref
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for displacement field
inversion, Gaussian pyramid, and affine / dense deformation composition
according to the dimension of the input images e.g. 2D or 3D.
"""
if self.dim == 2:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_2d
self.compose = vfu.compose_vector_fields_2d
else:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_3d
self.compose = vfu.compose_vector_fields_3d
def _init_optimizer(self, static, moving,
static_grid2world, moving_grid2world, prealign):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images and allocating the required memory for the transformation models
at the coarsest scale.
Parameters
----------
static: array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving: array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed to
be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign' matrix
static_grid2world: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign: array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
"""
self._connect_functions()
#Extract information from the affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
#the images' directions don't change with scale
self.static_direction = np.eye(self.dim + 1)
self.moving_direction = np.eye(self.dim + 1)
self.static_direction[:self.dim, :self.dim] = static_direction
self.moving_direction[:self.dim, :self.dim] = moving_direction
#Build the scale space of the input images
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Applying zero mask: ' + str(self.mask0))
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the moving image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the static image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.DEBUG:
print('Moving scale space:')
for level in range(self.levels):
self.moving_ss.print_level(level)
print('Static scale space:')
for level in range(self.levels):
self.static_ss.print_level(level)
#Get the properties of the coarsest level from the static image. These
#properties will be taken as the reference discretization.
disp_shape = self.static_ss.get_domain_shape(self.levels-1)
disp_grid2world = self.static_ss.get_affine(self.levels-1)
# The codomain discretization of both diffeomorphic maps is
# precisely the discretization of the static image
codomain_shape = static.shape
codomain_grid2world = static_grid2world
#The forward model transforms points from the static image
#to points on the reference (which is the static as well). So the domain
#properties are taken from the static image. Since its the same as the
#reference, we don't need to pre-align.
domain_shape = static.shape
domain_grid2world = static_grid2world
self.static_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
None)
self.static_to_ref.allocate()
#The backward model transforms points from the moving image
#to points on the reference (which is the static). So the input
#properties are taken from the moving image, and we need to pre-align
#points on the moving physical space to the reference physical space by
#applying the inverse of pre-align. This is done this way to make it
#clear for the user: the pre-align matrix is usually obtained by doing
#affine registration of the moving image towards the static image, which
#results in a matrix transforming points in the static physical space to
#points in the moving physical space
prealign_inv = None if prealign is None else npl.inv(prealign)
domain_shape = moving.shape
domain_grid2world = moving_grid2world
self.moving_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
prealign_inv)
self.moving_to_ref.allocate()
def _end_optimizer(self):
r"""Frees the resources allocated during initialization
"""
del self.moving_ss
del self.static_ss
def _iterate(self):
r"""Performs one symmetric iteration
Performs one iteration of the SyN algorithm:
1.Compute forward
2.Compute backward
3.Update forward
4.Update backward
5.Compute inverses
6.Invert the inverses
Returns
-------
der : float
the derivative of the energy profile, computed by fitting a
quadratic function to the energy values at the latest T iterations,
where T = self.energy_window. If the current iteration is less than
T then np.inf is returned instead.
"""
#Acquire current resolution information from scale spaces
current_moving = self.moving_ss.get_image(self.current_level)
current_static = self.static_ss.get_image(self.current_level)
current_disp_shape = \
self.static_ss.get_domain_shape(self.current_level)
current_disp_grid2world = \
self.static_ss.get_affine(self.current_level)
current_disp_world2grid = \
self.static_ss.get_affine_inv(self.current_level)
current_disp_spacing = \
self.static_ss.get_spacing(self.current_level)
#Warp the input images (smoothed to the current scale) to the common
#(reference) space at the current resolution
wstatic = self.static_to_ref.transform_inverse(current_static, 'linear',
None,
current_disp_shape,
current_disp_grid2world)
wmoving = self.moving_to_ref.transform_inverse(current_moving, 'linear',
None,
current_disp_shape,
current_disp_grid2world)
#Pass both images to the metric. Now both images are sampled on the
#reference grid (equal to the static image's grid) and the direction
#doesn't change across scales
self.metric.set_moving_image(wmoving, current_disp_grid2world,
current_disp_spacing, self.static_direction)
self.metric.use_moving_image_dynamics(
current_moving, self.moving_to_ref.inverse())
self.metric.set_static_image(wstatic, current_disp_grid2world,
current_disp_spacing, self.static_direction)
self.metric.use_static_image_dynamics(
current_static, self.static_to_ref.inverse())
#Initialize the metric for a new iteration
self.metric.initialize_iteration()
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_START)
#Compute the forward step (to be used to update the forward transform)
fw_step = np.array(self.metric.compute_forward())
#set zero displacements at the boundary
fw_step[0, ...] = 0
fw_step[:, 0, ...] = 0
fw_step[-1, ...] = 0
fw_step[:, -1, ...] = 0
if(self.dim == 3):
fw_step[:, :, 0, ...] = 0
fw_step[:, :, -1, ...] = 0
#Normalize the forward step
nrm = np.sqrt(np.sum((fw_step/current_disp_spacing)**2, -1)).max()
if nrm>0:
fw_step /= nrm
#Add to current total field
self.static_to_ref.forward, md_forward = self.update(
self.static_to_ref.forward, fw_step,
current_disp_world2grid, self.step_length)
del fw_step
#Keep track of the forward energy
fw_energy = self.metric.get_energy()
#Compose the backward step (to be used to update the backward transform)
bw_step = np.array(self.metric.compute_backward())
#set zero displacements at the boundary
bw_step[0, ...] = 0
bw_step[:, 0, ...] = 0
if(self.dim == 3):
bw_step[:, :, 0, ...] = 0
#Normalize the backward step
nrm = np.sqrt(np.sum((bw_step/current_disp_spacing)**2, -1)).max()
if nrm>0:
bw_step /= nrm
#Add to current total field
self.moving_to_ref.forward, md_backward = self.update(
self.moving_to_ref.forward, bw_step,
current_disp_world2grid, self.step_length)
del bw_step
#Keep track of the energy
bw_energy = self.metric.get_energy()
der = np.inf
n_iter = len(self.energy_list)
if len(self.energy_list) >= self.energy_window:
der = self._get_energy_derivative()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
ch = '-' if np.isnan(der) else der
print('%d:\t%0.6f\t%0.6f\t%0.6f\t%s' %
(n_iter, fw_energy, bw_energy, fw_energy + bw_energy, ch))
self.energy_list.append(fw_energy + bw_energy)
#Invert the forward model's forward field
self.static_to_ref.backward = np.array(
self.invert_vector_field(
self.static_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.backward))
#Invert the backward model's forward field
self.moving_to_ref.backward = np.array(
self.invert_vector_field(
self.moving_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.backward))
#Invert the forward model's backward field
self.static_to_ref.forward = np.array(
self.invert_vector_field(
self.static_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.forward))
#Invert the backward model's backward field
self.moving_to_ref.forward = np.array(
self.invert_vector_field(
self.moving_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.forward))
#Free resources no longer needed to compute the forward and backward
#steps
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_END)
self.metric.free_iteration()
return der
def _approximate_derivative_direct(self, x, y):
r"""Derivative of the degree-2 polynomial fit of the given x, y pairs
Directly computes the derivative of the least-squares-fit quadratic
function estimated from (x[...],y[...]) pairs.
Parameters
----------
x : array, shape(n,)
increasing array representing the x-coordinates of the points to
be fit
y : array, shape(n,)
array representing the y-coordinates of the points to be fit
Returns
-------
y0 : float
the estimated derivative at x0 = 0.5*len(x)
"""
x = np.asarray(x)
y = np.asarray(y)
X = np.row_stack((x**2, x, np.ones_like(x)))
XX = (X).dot(X.T)
b = X.dot(y)
beta = npl.solve(XX,b)
x0 = 0.5 * len(x)
y0 = 2.0 * beta[0] * (x0) + beta[1]
return y0
def _get_energy_derivative(self):
r"""Approximate derivative of the energy profile
Returns the derivative of the estimated energy as a function of "time"
(iterations) at the last iteration
"""
n_iter = len(self.energy_list)
if n_iter < self.energy_window:
raise ValueError('Not enough data to fit the energy profile')
x = range(self.energy_window)
y = self.energy_list[(n_iter - self.energy_window):n_iter]
ss = sum(y)
if(ss > 0):
ss *= -1
y = [v / ss for v in y]
der = self._approximate_derivative_direct(x,y)
return der
def _optimize(self):
r"""Starts the optimization
The main multi-scale symmetric optimization algorithm
"""
self.full_energy_profile = []
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_START)
for level in range(self.levels - 1, -1, -1):
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d'%level)
self.current_level = level
self.metric.set_levels_below(self.levels - level)
self.metric.set_levels_above(level)
if level < self.levels - 1:
expand_factors = \
self.static_ss.get_expand_factors(level+1, level)
new_shape = self.static_ss.get_domain_shape(level)
self.static_to_ref.expand_fields(expand_factors, new_shape)
self.moving_to_ref.expand_fields(expand_factors, new_shape)
self.niter = 0
self.energy_list = []
derivative = np.inf
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_START)
while ((self.niter < self.level_iters[self.levels - 1 - level]) and
(self.opt_tol < derivative)):
derivative = self._iterate()
self.niter += 1
self.full_energy_profile.extend(self.energy_list)
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_END)
# Reporting mean and std in stats[1] and stats[2]
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Static-Reference Residual error: %0.6f (%0.6f)'
% (stats[1], stats[2]))
residual, stats = self.moving_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Moving-Reference Residual error :%0.6f (%0.6f)'
% (stats[1], stats[2]))
#Compose the two partial transformations
self.static_to_ref = self.moving_to_ref.warp_endomorphism(
self.static_to_ref.inverse()).inverse()
# Report mean and std for the composed deformation field
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Final residual error: %0.6f (%0.6f)' % (stats[1], stats[2]))
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_END)
def optimize(self, static, moving, static_grid2world=None, moving_grid2world=None,
prealign=None):
r"""
Starts the optimization
Parameters
----------
static: array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving: array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed to
be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign' matrix
static_grid2world: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign: array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
Returns
-------
static_to_ref : DiffeomorphicMap object
the diffeomorphic map that brings the moving image towards the
static one in the forward direction (i.e. by calling
static_to_ref.transform) and the static image towards the
moving one in the backward direction (i.e. by calling
static_to_ref.transform_inverse).
"""
if self.verbosity >= VerbosityLevels.DEBUG:
print("Pre-align:", prealign)
self._init_optimizer(static.astype(floating), moving.astype(floating),
static_grid2world, moving_grid2world, prealign)
self._optimize()
self._end_optimizer()
return self.static_to_ref
| {
"content_hash": "8f5703225bc95f9bf709b6a9dbcb380d",
"timestamp": "",
"source": "github",
"line_count": 1721,
"max_line_length": 93,
"avg_line_length": 42.19174898314933,
"alnum_prop": 0.6071723682036028,
"repo_name": "oesteban/dipy",
"id": "fb86cdaf9493c12d8183ee445fbbbc3ba845da9f",
"size": "72612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dipy/align/imwarp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2694"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2342429"
}
],
"symlink_target": ""
} |
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['divxcrawler.tv']
self.base_link = 'http://www.divxcrawler.tv'
self.search_link = '/latest.htm'
self.search_link2 = '/streaming.htm'
self.search_link3 = '/movies.htm'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
imdb = data['imdb']
try:
query = urlparse.urljoin(self.base_link, self.search_link)
result = client.request(query)
m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
if m:
link = m
else:
query = urlparse.urljoin(self.base_link, self.search_link2)
result = client.request(query)
m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
if m:
link = m
else:
query = urlparse.urljoin(self.base_link, self.search_link3)
result = client.request(query)
m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
if m: link = m
except:
return
for item in link:
try:
quality, info = source_utils.get_release_quality(item[2], None)
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[0])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
url = item[2]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| {
"content_hash": "49bece9897d940b373a186a5ebb82bf0",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 118,
"avg_line_length": 37.2972972972973,
"alnum_prop": 0.49082125603864735,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "360fb25ffe7f03a06c96e4d66d102af447391878",
"size": "4165",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "script.module.exodus/lib/resources/lib/sources/en/divxcrawler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = 'Test TSContScheduleOnPool API'
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess('ts')
Test.testName = 'Test TSContScheduleOnPool API'
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 32,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSContSchedule_test'
})
# Load plugin
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'pool')
# www.example.com Host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'printf "Test TSContScheduleOnPool API"'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
ts.Disk.traffic_out.Content = "gold/schedule_on_pool.gold"
ts.Disk.traffic_out.Content += Testers.ExcludesExpression('fail', 'should not contain "fail"')
| {
"content_hash": "04e6c249193832ac3ab788727177365f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 102,
"avg_line_length": 36.97959183673469,
"alnum_prop": 0.7472406181015453,
"repo_name": "vmamidi/trafficserver",
"id": "633970cc360eae79f04acedf6b6bbd37e148b523",
"size": "1812",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/gold_tests/cont_schedule/schedule_on_pool.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1478484"
},
{
"name": "C++",
"bytes": "16571327"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "216563"
},
{
"name": "Makefile",
"bytes": "250981"
},
{
"name": "Objective-C",
"bytes": "12976"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1517480"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "177920"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim script",
"bytes": "192"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='UUTracking',
version='0.1',
description='Monitor framework',
packages=['UUTrack',
'UUTrack.View',
'UUTrack.View.Monitor',
'UUTrack.Model',
'UUTrack.Model.Cameras',
'UUTrack.Controller',
'UUTrack.Controller.devices',
'UUTrack.Controller.devices.keysight',
'UUTrack.Controller.devices.hamamatsu',
'UUTrack.Controller.devices.PhotonicScience'],
url='https://github.com/aquilesC/UUTrack',
license='MIT',
author='Aquiles',
author_email='aquiles@aquicarattino.com',
classifiers=[
'Intended Audience :: End Users/Desktop',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
],
package_data={'UUTrack': ['View/Monitor/Icons/*.*', 'View/Monitor/Icons/*.*']},
include_package_data=True,
install_requires=['lantz',]
)
| {
"content_hash": "2fb3da77974a77f47561176893b819c3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 33.758620689655174,
"alnum_prop": 0.5914198161389173,
"repo_name": "aquilesC/UUTrack",
"id": "1efad9ed0539a80a308bc7dd437160ef677ec37d",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "368149"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('allmychanges', '0002_auto_20160216_0839'),
]
operations = [
migrations.AddField(
model_name='issue',
name='importance',
field=models.IntegerField(default=0, db_index=True, blank=True),
),
]
| {
"content_hash": "60e53e2efa5dd5d3d6be1bcd27be5d1d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.6073170731707317,
"repo_name": "AllMyChanges/allmychanges.com",
"id": "7a7a00a9cd8eaa6f1a70b1b3944553324a79ec15",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allmychanges/migrations/0003_issue_importance.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "147634"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Emacs Lisp",
"bytes": "905"
},
{
"name": "HTML",
"bytes": "96639"
},
{
"name": "JavaScript",
"bytes": "2645620"
},
{
"name": "Makefile",
"bytes": "7806"
},
{
"name": "Python",
"bytes": "752509"
},
{
"name": "Shell",
"bytes": "1426"
},
{
"name": "Stylus",
"bytes": "58519"
}
],
"symlink_target": ""
} |
"""
Features of waveforms (e.g waveform_snr).
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
import neo
import numpy as np
__all__ = [
"waveform_width",
"waveform_snr"
]
def waveform_width(waveform, cutoff=0.75):
"""
Calculate the width (trough-to-peak TTP) of a waveform.
Searches for an index of a minimum within first `cutoff` of the waveform
vector, next for a maximum after the identified minimum, and returns the
difference between them.
Parameters
----------
waveform : array-like
Time course of a single waveform. Accepts a list, a numpy array or a
quantity.
cutoff : float, optional
Defines the normalized range `[0, cutoff]` of the input sequence for
computing the minimum. Must be in `[0, 1)` range.
Default: 0.75.
Returns
-------
width : int
Width of a waveform expressed as a number of data points
Raises
------
ValueError
If `waveform` is not a one-dimensional vector with at least two
numbers.
If `cutoff` is not in `[0, 1)` range.
"""
waveform = np.squeeze(waveform)
if np.ndim(waveform) != 1:
raise ValueError('Expected 1-dimensional waveform.')
if len(waveform) < 2:
raise ValueError('Too short waveform.')
if not (0 <= cutoff < 1):
raise ValueError('Cuttoff must be in range [0, 1).')
min_border = max(1, int(len(waveform) * cutoff))
idx_min = np.argmin(waveform[:min_border])
idx_max = np.argmax(waveform[idx_min:]) + idx_min
width = idx_max - idx_min
return width
def waveform_snr(waveforms):
"""
Return the signal-to-noise ratio of the waveforms of one or more
spike trains.
Signal-to-noise ratio is defined as the difference in mean peak-to-trough
voltage divided by twice the mean SD. The mean SD is computed by
measuring the SD of the spike waveform over all acquired spikes
at each of the sample time points of the waveform and then averaging [1]_.
Parameters
----------
waveforms : array-like
A list or a quantity or a numpy array of waveforms of shape
``(n_waveforms, time)`` in case of a single spike train or
``(n_waveforms, n_spiketrains, time)`` in case of one or more spike
trains.
Returns
-------
snr : float or np.ndarray
Signal-to-noise ratio according to [1]_. If the input `waveforms`
shape is ``(n_waveforms, time)`` or ``(n_waveforms, 1, time)``, a
single float is returned. Otherwise, if the shape is
``(n_waveforms, n_spiketrains, time)``, a numpy array of length
``n_spiketrains`` is returned.
Notes
-----
The waveforms of a `neo.SpikeTrain` can be extracted as
`spiketrain.waveforms`, if it's loaded from a file, in which case you need
to set ``load_waveforms=True`` in ``neo.read_block()``.
References
----------
.. [1] Hatsopoulos, N. G., Xu, Q. & Amit, Y.
Encoding of Movement Fragments in the Motor Cortex.
J. Neurosci. 27, 5105–5114 (2007).
"""
if isinstance(waveforms, neo.SpikeTrain):
warnings.warn("spiketrain input is deprecated; "
"pass 'spiketrain.waveforms' directly.")
waveforms = waveforms.waveforms
# asarray removes quantities, if present
waveforms = np.squeeze(np.asarray(waveforms))
# average over all waveforms for each bin
mean_waveform = waveforms.mean(axis=0)
# standard deviation over all waveforms over all bins
std_waveform = waveforms.std(axis=0).mean(axis=-1)
# peak to trough voltage signal
peak_range = mean_waveform.max(axis=-1) - mean_waveform.min(axis=-1)
# noise
noise = 2 * std_waveform
snr = peak_range / noise
if np.isnan(snr).any():
warnings.warn('The waveforms noise was evaluated to 0. Returning NaN')
return snr
| {
"content_hash": "cf0a641aea56e182148820f730ea8919",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 31.62015503875969,
"alnum_prop": 0.6388820789409169,
"repo_name": "JuliaSprenger/elephant",
"id": "84e32272d2b2916d2dd6f780eba66bc2ed4beed3",
"size": "4105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elephant/waveform_features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1327156"
}
],
"symlink_target": ""
} |
import unittest
from os.path import abspath, basename, dirname, join
from robot.utils.asserts import assert_equals, assert_true
from robot.result.testsuite import TestSuite
from robot.result.testcase import TestCase
from robot.result.keyword import Keyword
from robot.result.message import Message
from robot.result.executionerrors import ExecutionErrors
from robot.model import Statistics
from robot.reporting.jsmodelbuilders import *
from robot.reporting.stringcache import StringIndex
CURDIR = dirname(abspath(__file__))
def remap(model, strings):
if isinstance(model, StringIndex):
return strings[model][1:]
elif isinstance(model, (int, long, type(None))):
return model
elif isinstance(model, tuple):
return tuple(remap(item, strings) for item in model)
else:
raise AssertionError("Item '%s' has invalid type '%s'" % (model, type(model)))
class TestBuildTestSuite(unittest.TestCase):
def test_default_suite(self):
self._verify_suite(TestSuite())
def test_suite_with_values(self):
suite = TestSuite('Name', 'Doc', {'m1': 'v1', 'M2': 'V2'}, None, 'Message',
'20111204 19:00:00.000', '20111204 19:00:42.001')
self._verify_suite(suite, 'Name', 'Doc', ('m1', '<p>v1</p>', 'M2', '<p>V2</p>'),
message='Message', start=0, elapsed=42001)
def test_relative_source(self):
self._verify_suite(TestSuite(source='non-existing'), source='non-existing')
source = join(CURDIR, 'test_jsmodelbuilders.py')
self._verify_suite(TestSuite(source=source), source=source,
relsource=basename(source))
def test_suite_html_formatting(self):
self._verify_suite(TestSuite(name='*xxx*', doc='*bold* <&>',
metadata={'*x*': '*b*', '<': '>'}),
name='*xxx*', doc='<b>bold</b> <&>',
metadata=('*x*', '<p><b>b</b></p>', '<', '<p>></p>'))
def test_default_test(self):
self._verify_test(TestCase())
def test_test_with_values(self):
test = TestCase('Name', '*Doc*', ['t1', 't2'], '1 minute', 'PASS', 'Msg',
'20111204 19:22:22.222', '20111204 19:22:22.333')
self._verify_test(test, 'Name', '<b>Doc</b>', ('t1', 't2'), 1,
'1 minute', 1, 'Msg', 0, 111)
def test_default_keyword(self):
self._verify_keyword(Keyword())
def test_keyword_with_values(self):
kw = Keyword('KW Name', 'libname', 'http://doc', ('arg1', 'arg2'), ('${v1}', '${v2}'),
'1 second', 'setup', 'PASS',
'20111204 19:42:42.000', '20111204 19:42:42.042')
self._verify_keyword(kw, 1, 'KW Name', 'libname',
'<a href="http://doc">http://doc</a>',
'arg1, arg2', '${v1}, ${v2}', '1 second', 1, 0, 42)
def test_default_message(self):
self._verify_message(Message())
self._verify_min_message_level('INFO')
def test_message_with_values(self):
msg = Message('Message', 'DEBUG', timestamp='20111204 22:04:03.210')
self._verify_message(msg, 'Message', 1, 0)
self._verify_min_message_level('DEBUG')
def test_message_linking(self):
msg = Message('Message', 'WARN', timestamp='20111204 22:04:03.210',
parent=TestCase().keywords.create())
self._verify_message(msg, 'Message', 3, 0)
links = self.context._msg_links
assert_equals(len(links), 1)
key = (msg.message, msg.level, msg.timestamp)
assert_equals(remap(links[key], self.context.strings), 't1-k1')
def test_message_with_html(self):
self._verify_message(Message('<img>'), '<img>')
self._verify_message(Message('<b></b>', html=True), '<b></b>')
def test_nested_structure(self):
suite = TestSuite()
suite.set_criticality(critical_tags=['crit'])
suite.keywords = [Keyword(type='setup'), Keyword(type='teardown')]
K1 = self._verify_keyword(suite.keywords[0], type=1)
K2 = self._verify_keyword(suite.keywords[1], type=2)
suite.suites = [TestSuite()]
suite.suites[0].tests = [TestCase(tags=['crit', 'xxx'])]
t = self._verify_test(suite.suites[0].tests[0], tags=('crit', 'xxx'))
suite.tests = [TestCase(), TestCase(status='PASS')]
S1 = self._verify_suite(suite.suites[0],
status=0, tests=(t,), stats=(1, 0, 1, 0))
suite.tests[0].keywords = [Keyword(type='for'), Keyword()]
suite.tests[0].keywords[0].keywords = [Keyword(type='foritem')]
suite.tests[0].keywords[0].messages = [Message()]
k = self._verify_keyword(suite.tests[0].keywords[0].keywords[0], type=4)
m = self._verify_message(suite.tests[0].keywords[0].messages[0])
k1 = self._verify_keyword(suite.tests[0].keywords[0],
type=3, keywords=(k,), messages=(m,))
suite.tests[0].keywords[1].messages = [Message(), Message('msg', level='TRACE')]
m1 = self._verify_message(suite.tests[0].keywords[1].messages[0])
m2 = self._verify_message(suite.tests[0].keywords[1].messages[1], 'msg', level=0)
k2 = self._verify_keyword(suite.tests[0].keywords[1], messages=(m1, m2))
T1 = self._verify_test(suite.tests[0], critical=0, keywords=(k1, k2))
T2 = self._verify_test(suite.tests[1], critical=0, status=1)
self._verify_suite(suite, status=0, keywords=(K1, K2), suites=(S1,),
tests=(T1, T2), stats=(3, 1, 1, 0))
self._verify_min_message_level('TRACE')
def test_timestamps(self):
suite = TestSuite(starttime='20111205 00:33:33.333')
suite.keywords.create(starttime='20111205 00:33:33.334')
suite.keywords[0].messages.create('Message', timestamp='20111205 00:33:33.343')
suite.keywords[0].messages.create(level='DEBUG', timestamp='20111205 00:33:33.344')
suite.tests.create(starttime='20111205 00:33:34.333')
context = JsBuildingContext()
model = SuiteBuilder(context).build(suite)
self._verify_status(model[5], start=0)
self._verify_status(model[-2][0][7], start=1)
self._verify_mapped(model[-2][0][-1], context.strings,
((10, 2, 'Message'), (11, 1, '')))
self._verify_status(model[-3][0][5], start=1000)
def _verify_status(self, model, status=0, start=None, elapsed=0):
assert_equals(model, (status, start, elapsed))
def _verify_suite(self, suite, name='', doc='', metadata=(), source='',
relsource='', status=1, message='', start=None, elapsed=0,
suites=(), tests=(), keywords=(), stats=(0, 0, 0, 0)):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(SuiteBuilder, suite, name, source,
relsource, doc, metadata, status,
suites, tests, keywords, stats)
def _get_status(self, *elements):
return elements if elements[-1] else elements[:-1]
def _verify_test(self, test, name='', doc='', tags=(), critical=1, timeout='',
status=0, message='', start=None, elapsed=0, keywords=()):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(TestBuilder, test, name, timeout,
critical, doc, tags, status, keywords)
def _verify_keyword(self, keyword, type=0, kwname='', libname='', doc='',
args='', assign='', timeout='', status=0, start=None,
elapsed=0, keywords=(), messages=()):
status = (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(KeywordBuilder, keyword, type, kwname,
libname, timeout, doc, args, assign,
status, keywords, messages)
def _verify_message(self, msg, message='', level=2, timestamp=None):
return self._build_and_verify(MessageBuilder, msg, timestamp, level, message)
def _verify_min_message_level(self, expected):
assert_equals(self.context.min_level, expected)
def _build_and_verify(self, builder_class, item, *expected):
self.context = JsBuildingContext(log_path=join(CURDIR, 'log.html'))
model = builder_class(self.context).build(item)
self._verify_mapped(model, self.context.strings, expected)
return expected
def _verify_mapped(self, model, strings, expected):
mapped_model = tuple(remap(model, strings))
assert_equals(mapped_model, expected)
class TestSplitting(unittest.TestCase):
def test_test_keywords(self):
suite = self._get_suite_with_tests()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-3][0][-1], expected[-3][1][-1]]
expected[-3][0][-1], expected[-3][1][-1] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equals(context.strings, ('*', '*suite', '*t1', '*t2'))
assert_equals(model, expected)
assert_equals([strings for _, strings in context.split_results],
[('*', '*t1-k1', '*t1-k1-k1', '*t1-k2'), ('*', '*t2-k1')])
assert_equals([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_tests(self):
suite = TestSuite(name='suite')
suite.tests = [TestCase('t1'), TestCase('t2')]
suite.tests[0].keywords = [Keyword('t1-k1'), Keyword('t1-k2')]
suite.tests[0].keywords[0].keywords = [Keyword('t1-k1-k1')]
suite.tests[1].keywords = [Keyword('t2-k1')]
return suite
def _build_and_remap(self, suite, split_log=False):
context = JsBuildingContext(split_log=split_log)
model = remap(SuiteBuilder(context).build(suite), context.strings)
return self._to_list(model), context
def _to_list(self, model):
return list(self._to_list(item) if isinstance(item, tuple) else item
for item in model)
def test_suite_keywords(self):
suite = self._get_suite_with_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-2][0][-2], expected[-2][1][-2]]
expected[-2][0][-2], expected[-2][1][-2] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equals(context.strings, ('*', '*root', '*k1', '*k2'))
assert_equals(model, expected)
assert_equals([strings for _, strings in context.split_results],
[('*', '*k1-k2'), ('*',)])
assert_equals([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_keywords(self):
suite = TestSuite(name='root')
suite.keywords = [Keyword('k1', type='setup'), Keyword('k2', type='teardown')]
suite.keywords[0].keywords = [Keyword('k1-k2')]
return suite
def test_nested_suite_and_test_keywords(self):
suite = self._get_nested_suite_with_tests_and_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-2],
expected[-2][0][-2], expected[-2][1][-2]]
(expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-2],
expected[-2][0][-2], expected[-2][1][-2]) = 1, 2, 3, 4, 5, 6
model, context = self._build_and_remap(suite, split_log=True)
assert_equals(model, expected)
assert_equals([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_nested_suite_with_tests_and_keywords(self):
suite = self._get_suite_with_keywords()
sub = TestSuite(name='suite2')
suite.suites = [self._get_suite_with_tests(), sub]
sub.keywords.create('kw', type='setup')
sub.keywords[0].keywords.create('skw')
sub.keywords[0].keywords[0].messages.create('Message')
sub.tests.create('test', doc='tdoc')
sub.tests[0].keywords.create('koowee', doc='kdoc')
return suite
def test_message_linking(self):
suite = self._get_suite_with_keywords()
msg = suite.keywords[0].keywords[0].messages.create(
'Message', 'WARN', timestamp='20111204 22:04:03.210')
context = JsBuildingContext(split_log=True)
SuiteBuilder(context).build(suite)
errors = ErrorsBuilder(context).build(ExecutionErrors([msg]))
assert_equals(remap(errors, context.strings),
((0, 3, 'Message', 's1-k1-k1'),))
assert_equals(remap(context.link(msg), context.strings), 's1-k1-k1')
assert_true('*s1-k1-k1' in context.strings)
for res in context.split_results:
assert_true('*s1-k1-k1' not in res[1])
class TestPruneInput(unittest.TestCase):
def setUp(self):
self.suite = TestSuite()
self.suite.keywords = [Keyword(), Keyword()]
s1 = self.suite.suites.create()
s1.keywords.create()
tc = s1.tests.create()
tc.keywords = [Keyword(), Keyword(), Keyword()]
s2 = self.suite.suites.create()
t1 = s2.tests.create()
t2 = s2.tests.create()
t1.keywords = [Keyword()]
t2.keywords = [Keyword(), Keyword()]
def test_prune_input_false(self):
SuiteBuilder(JsBuildingContext(prune_input=False)).build(self.suite)
assert_equals(len(self.suite.keywords), 2)
assert_equals(len(self.suite.suites[0].keywords), 1)
assert_equals(len(self.suite.suites[0].tests[0].keywords), 3)
assert_equals(len(self.suite.suites[1].keywords), 0)
assert_equals(len(self.suite.suites[1].tests[0].keywords), 1)
assert_equals(len(self.suite.suites[1].tests[1].keywords), 2)
def test_prune_input_true(self):
SuiteBuilder(JsBuildingContext(prune_input=True)).build(self.suite)
assert_equals(len(self.suite.keywords), 0)
assert_equals(len(self.suite.suites), 0)
assert_equals(len(self.suite.tests), 0)
def test_prune_errors(self):
errors = ExecutionErrors([Message(), Message()])
ErrorsBuilder(JsBuildingContext(prune_input=False)).build(errors)
assert_equals(len(errors), 2)
ErrorsBuilder(JsBuildingContext(prune_input=True)).build(errors)
assert_equals(len(errors), 0)
class TestBuildStatistics(unittest.TestCase):
def test_total_stats(self):
critical, all = self._build_statistics()[0]
self._verify_stat(all, 2, 2, 'All Tests', '00:00:33')
self._verify_stat(critical, 2, 0, 'Critical Tests', '00:00:22')
def test_tag_stats(self):
t2, comb, t1, t3 = self._build_statistics()[1]
self._verify_stat(t2, 2, 0, 't2', '00:00:22',
info='critical', doc='doc', links='t:url')
self._verify_stat(comb, 2, 0, 'name', '00:00:22',
info='combined', combined='t1&t2')
self._verify_stat(t1, 2, 2, 't1', '00:00:33')
self._verify_stat(t3, 0, 1, 't3', '00:00:01')
def test_suite_stats(self):
root, sub1, sub2 = self._build_statistics()[2]
self._verify_stat(root, 2, 2, 'root', '00:00:42', name='root', id='s1')
self._verify_stat(sub1, 1, 1, 'root.sub1', '00:00:10', name='sub1', id='s1-s1')
self._verify_stat(sub2, 1, 1, 'root.sub2', '00:00:30', name='sub2', id='s1-s2')
def _build_statistics(self):
return StatisticsBuilder().build(self._get_statistics())
def _get_statistics(self):
return Statistics(self._get_suite(),
suite_stat_level=2,
tag_stat_combine=[('t1&t2', 'name')],
tag_doc=[('t2', 'doc')],
tag_stat_link=[('?2', 'url', '%1')])
def _get_suite(self):
ts = lambda s, ms=0: '20120816 16:09:%02d.%03d' % (s, ms)
suite = TestSuite(name='root', starttime=ts(0), endtime=ts(42))
suite.set_criticality(critical_tags=['t2'])
sub1 = TestSuite(name='sub1', starttime=ts(0), endtime=ts(10))
sub2 = TestSuite(name='sub2')
suite.suites = [sub1, sub2]
sub1.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(0), endtime=ts(1, 500)),
TestCase(tags=['t1', 't3'], status='FAIL', starttime=ts(2), endtime=ts(3, 499))
]
sub2.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(10), endtime=ts(30))
]
sub2.suites.create(name='below suite stat level')\
.tests.create(tags=['t1'], status='FAIL', starttime=ts(30), endtime=ts(40))
return suite
def _verify_stat(self, stat, pass_, fail, label, elapsed, **attrs):
attrs.update({'pass': pass_, 'fail': fail, 'label': label,
'elapsed': elapsed})
assert_equals(stat, attrs)
class TestBuildErrors(unittest.TestCase):
def setUp(self):
msgs = [Message('Error', 'ERROR', timestamp='20111206 14:33:00.000'),
Message('Warning', 'WARN', timestamp='20111206 14:33:00.042')]
self.errors = ExecutionErrors(msgs)
def test_errors(self):
context = JsBuildingContext()
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equals(model, ((0, 5, 'Error'), (42, 3, 'Warning')))
def test_linking(self):
self.errors.messages.create('Linkable', 'WARN',
timestamp='20111206 14:33:00.001')
context = JsBuildingContext()
kw = TestSuite().tests.create().keywords.create()
MessageBuilder(context).build(kw.messages.create('Linkable', 'WARN',
timestamp='20111206 14:33:00.001'))
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equals(model, ((-1, 5, 'Error'), (41, 3, 'Warning'),
(0, 3, 'Linkable', 's1-t1-k1')))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d440354978f4cb4d812c7ce860e040d8",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 94,
"avg_line_length": 47.178391959798994,
"alnum_prop": 0.5718698407626351,
"repo_name": "kyle1986/robortframe",
"id": "dee3b96bc8b87e577709f54111462b45c7b47f5e",
"size": "18777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utest/reporting/test_jsmodelbuilders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "22850"
},
{
"name": "HTML",
"bytes": "137580"
},
{
"name": "Java",
"bytes": "59216"
},
{
"name": "JavaScript",
"bytes": "160117"
},
{
"name": "Python",
"bytes": "2072305"
},
{
"name": "RobotFramework",
"bytes": "1929991"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['LinearTrend'] , ['Seasonal_DayOfMonth'] , ['MLP'] ); | {
"content_hash": "7e2a53872a42a312dc16bd0155e40b94",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 88,
"avg_line_length": 40.25,
"alnum_prop": 0.7142857142857143,
"repo_name": "antoinecarme/pyaf",
"id": "94117ab76ea06826bc1486d2126b0cdce8ffe639",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_LinearTrend_Seasonal_DayOfMonth_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
def run_async(func):
"""
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
if __name__ == '__main__':
from time import sleep
@run_async
def print_somedata():
print 'starting print_somedata'
sleep(2)
print 'print_somedata: 2 sec passed'
sleep(2)
print 'print_somedata: 2 sec passed'
sleep(2)
print 'finished print_somedata'
def main():
print_somedata()
print 'back in main'
print_somedata()
print 'back in main'
print_somedata()
print 'back in main'
main()
| {
"content_hash": "4ca157caa815f040bdc230d7d3fb4926",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 64,
"avg_line_length": 18.12727272727273,
"alnum_prop": 0.645937813440321,
"repo_name": "ActiveState/code",
"id": "b856ec32bf83be880e5f5843b50e6bf47647fc48",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576684_Simple_threading_decorator/recipe-576684.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import logging
import json
import datetime
import math
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from expenses.forms import ExpenseForm, SignupForm
from expenses.models import Expense, Balance
from django.core import serializers
from django.db import IntegrityError
from django.db.models.query import QuerySet
from django.contrib import auth
from django.forms.models import inlineformset_factory
from django.views.decorators.csrf import csrf_protect
MAX_RETURNED_EXPENSES = 30
def login_required(function=None, redirect_field_name=None):
"""
Just make sure the user is authenticated to access a certain ajax view
Otherwise return a HttpResponse 401 - authentication required
instead of the 302 redirect of the original Django decorator
"""
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
else:
return HttpResponse(status=401)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
def _serialize(model, ignore=[]):
obj = {}
field_names = model._meta.get_all_field_names()
for i in field_names:
do_ignore = False
item = getattr(model, i)
for ig in ignore:
if isinstance(item, ig):
do_ignore = True
if not do_ignore:
if type(item) == datetime.datetime:
obj[i] = item.isoformat('T')
else:
obj[i] = item
return obj
def serialize(models, ignore=[]):
obj = None
if type(models) == QuerySet:
obj = []
for model in models:
obj.append(_serialize(model, ignore))
else:
obj = _serialize(models, ignore)
return obj
def login(request):
status = 401
data = json.loads(request.body)
user = auth.authenticate(username=data['username'], password=data['password'])
if user is not None and user.is_active:
auth.login(request, user)
status = 200
data['password'] = ''
return HttpResponse(status=status, content=json.dumps(data), content_type="text/json")
@login_required
def logout(request):
auth.logout(request)
return HttpResponse(status=200)
def signup(request):
status = 406
response = None
data = json.loads(request.body)
form = SignupForm(data)
if form.is_valid():
try:
username = form.cleaned_data['username']
password = form.cleaned_data['password']
auth.models.User.objects.create_user(username, password=password)
user = auth.authenticate(**form.cleaned_data)
auth.login(request, user)
except IntegrityError:
response = {'errors':{'username':"Username is already taken."}}
else:
status = 201
else:
response = {'errors':form.errors}
return HttpResponse(status=status, content_type="text/json", content=json.dumps(response))
@csrf_protect
def index(request):
return render(request, 'index.html')
@login_required
def expense(request, id=0):
""" Get or Create/Update an expense """
status = 400
expense = None
errors = {}
if request.method == 'PUT':
data = json.loads(request.body)
expense = get_object_or_404(Expense, pk=id, user=request.user)
form = ExpenseForm(data, instance=expense)
if form.is_valid():
balance = Balance.objects.get(user=request.user)
# Reset balance because we're updating an expense.
balance.amount -= expense.amount
form.save()
balance.amount += expense.amount
balance.save()
status = 200
else:
errors = {'errors':form.errors}
elif request.method == 'POST':
data = json.loads(request.body)
form = ExpenseForm(data)
if form.is_valid():
form.cleaned_data['user'] = request.user
expense = Expense(**form.cleaned_data)
expense.save()
try:
balance = Balance.objects.get(user=request.user)
except Balance.DoesNotExist:
# This user doesn't have a balance yet, create one.
balance = Balance.objects.create(amount=0.0, user=request.user)
balance.amount += expense.amount
balance.save()
status = 201
else:
errors = {'errors':form.errors}
elif request.method == 'DELETE':
expense = get_object_or_404(Expense, pk=id, user=request.user)
try:
# Make sure to update balance depending on the transaction type.
balance = Balance.objects.get(user=request.user)
balance.amount -= expense.amount
balance.save()
expense.delete()
except:
status = 500
return HttpResponse(status=status)
else:
try:
expense = get_object_or_404(Expense, pk=id)
except Expense.DoesNotExist:
logging.error('expense does not exist: ' + id)
status = 404
response = errors if errors else serialize(expense, ignore=[auth.models.User])
return HttpResponse(content=json.dumps(response), status=status, content_type='application/json')
@login_required
def total(request):
""" Display the total """
json_obj = {'amount': 0.0}
try:
balance = Balance.objects.get(user=request.user)
except:
pass
else:
json_obj['amount'] = balance.amount
return HttpResponse(json.dumps(json_obj), content_type='application/json')
@login_required
def list(request):
""" render the list page with 30 entries """
page = int(request.GET.get('page', 1)) - 1
offset = MAX_RETURNED_EXPENSES * int(page)
expenses = Expense.objects.filter(user=request.user).order_by('created_at').reverse()[offset:(offset + MAX_RETURNED_EXPENSES)]
total = Expense.objects.filter(user=request.user).count()
remaining = total - (offset + len(expenses))
pages = math.ceil(float(total) / float(MAX_RETURNED_EXPENSES))
json_object = {'total': total, 'remaining': remaining, 'pages': pages, 'expenses': serialize(expenses, ignore=[auth.models.User])}
return HttpResponse(content=json.dumps(json_object), content_type='application/json')
| {
"content_hash": "89321f025216f7e4ef75d390bce9eb96",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 134,
"avg_line_length": 32.208955223880594,
"alnum_prop": 0.6244979919678715,
"repo_name": "deanproxy/register",
"id": "d6e44412e71e83c4bd8b45d8d5d4ad8f63a443cc",
"size": "6474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expenses/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "258099"
},
{
"name": "JavaScript",
"bytes": "36133"
},
{
"name": "Python",
"bytes": "27850"
}
],
"symlink_target": ""
} |
import argparse
import tokenize
from difflib import unified_diff
from io import BytesIO
from pathlib import Path
from token import NAME, OP
from typing import Dict, List, Optional, Set
def main() -> None:
args = create_parser().parse_args()
build_files: Set[Path] = {
fp
for folder in args.folders
for fp in [*folder.rglob("BUILD"), *folder.rglob("BUILD.*")]
# Check that it really is a BUILD file
if fp.is_file() and fp.stem == "BUILD"
}
updates: Dict[Path, List[str]] = {}
for build in build_files:
possibly_new_build = maybe_rewrite_build(build)
if possibly_new_build is not None:
updates[build] = possibly_new_build
for build, new_content in updates.items():
if args.preview:
print(generate_diff(build, new_content))
else:
build.write_text("\n".join(new_content) + "\n")
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Convert deprecated `source` fields to `sources`.",
)
parser.add_argument(
"folders", type=Path, nargs="+", help="Folders to recursively search for `BUILD` files"
)
parser.add_argument(
"-p",
"--preview",
action="store_true",
help="Output to stdout rather than overwriting BUILD files.",
)
return parser
def maybe_rewrite_line(line: str) -> Optional[str]:
try:
tokens = list(tokenize.tokenize(BytesIO(line.encode()).readline))
except tokenize.TokenError:
return None
source_field = next(
(token for token in tokens if token.type is NAME and token.string == "source"), None
)
if not source_field:
return None
source_field_index = tokens.index(source_field)
# Ensure that the next token is `=`
if (
tokens[source_field_index + 1].type is not OP
and tokens[source_field_index + 1].string != "="
):
return None
source_value = tokens[source_field_index + 2]
prefix = line[: source_field.start[1]]
interfix = line[source_field.end[1] : source_value.start[1]]
suffix = line[source_value.end[1] :]
return f"{prefix}sources{interfix}[{source_value.string}]{suffix}"
def maybe_rewrite_build(build_file: Path) -> Optional[List[str]]:
original_text = build_file.read_text()
original_text_lines = original_text.splitlines()
updated_text_lines = original_text_lines.copy()
# import ipdb;
# ipdb.set_trace()
for i, line in enumerate(original_text_lines):
maybe_new_line = maybe_rewrite_line(line)
if maybe_new_line is not None:
updated_text_lines[i] = maybe_new_line
return updated_text_lines if updated_text_lines != original_text_lines else None
def generate_diff(build_file: Path, new_content: List[str]) -> str:
def green(s: str) -> str:
return f"\x1b[32m{s}\x1b[0m"
def red(s: str) -> str:
return f"\x1b[31m{s}\x1b[0m"
diff = unified_diff(
build_file.read_text().splitlines(),
new_content,
fromfile=str(build_file),
tofile=str(build_file),
)
msg = ""
for line in diff:
if line.startswith("+") and not line.startswith("+++"):
msg += green(line)
elif line.startswith("-") and not line.startswith("---"):
msg += red(line)
else:
msg += line
if not (line.startswith("+++") or line.startswith("---") or line.startswith("@@ ")):
msg += "\n"
return msg
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| {
"content_hash": "7cd4310f343e7bfabd00aa8461fdd08c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 95,
"avg_line_length": 31.17094017094017,
"alnum_prop": 0.6048807238826432,
"repo_name": "benjyw/pants",
"id": "623963bfc7f741385cb455881d0c91e28b74681f",
"size": "3802",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build-support/migration-support/convert_source_to_sources.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
AUTH_USER_MODEL = 'backend.ChinoUser'
AUTHENTICATION_BACKENDS = [
'backend.models.ChinoRemoteUserBackend',
]
# PUT YOUR CHINO DATA HERE.
CHINO_ID = '...'
CHINO_KEY = '...'
CHINO_URL = 'https://api.dev.chino.io/'
# production URL:
#CHINO_URL = 'https://api.chino.io/'
CHINO_APPLICATION_ID = '...'
CHINO_APPLICATION_SECRET = '...'
| {
"content_hash": "22615c00ef9d6578226888c971f82edf",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 19.88235294117647,
"alnum_prop": 0.6597633136094675,
"repo_name": "chinoio/chino-django",
"id": "3ae143da9a29f88ecb25c76942a7e9558836e27a",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chino/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26917"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2018, Zato Source s.r.o. https://zato.io
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Originally part of Zato - open-source ESB, SOA, REST, APIs and cloud integrations in Python
# https://zato.io
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys
from setuptools import setup, find_packages
version = '1.0.6'
LONG_DESCRIPTION = """
===========
zato-enclog
===========
* Encrypted logger which stores everything using Fernet keys (AES128). Safe to use in environments
that cannot store Personally Identifiable Information (PII) in clear text, such as HIPAA-compliant applications.
* Comes with a command line tool that is used to decrypt logs, including both open and tail -f functionality.
* Learn more about Fernet: https://cryptography.io/en/latest/fernet/
::
# stdlib
import logging
# Zato
from zato.enclog import EncryptedLogFormatter, genkey
level = logging.INFO
format = '%(levelname)s - %(message)s'
key = genkey()
formatter = EncryptedLogFormatter(key, format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger('')
logger.addHandler(handler)
logger.setLevel(level)
logger.info(b'{"user":"Jane Xi"}')
# Shows the following
INFO - gAAAAABWYa17oiDoSMVjF8JM9DWzB3dtEvenW9laKqgsFl4d4ksbLCkoJzTyrI3nXKYVOcC03dhJ_BwfWlBN3CdGxJZAwMmfUbUzLHkqw2JeTzdgtz0YEGU=
"""
def parse_requirements(requirements):
with open(requirements) as f:
return [line.strip('\n') for line in f if line.strip('\n') and not line[0] in ('#',)]
package_dir = b'src' if sys.version_info.major == 2 else 'src'
setup(
name = 'zato-enclog',
version = version,
scripts = ['src/zato/enclog/console/enclog'],
author = 'Dariusz Suchojad',
author_email = 'dsuch at zato.io',
url = 'https://zato.io/docs/progguide/enclog/index.html',
description = 'Encrypted logger reusable in any Python application',
long_description = LONG_DESCRIPTION,
platforms = ['OS Independent'],
license = 'BSD License',
package_dir = {'':package_dir},
packages = find_packages(package_dir),
namespace_packages = ['zato'],
install_requires = parse_requirements(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')),
zip_safe = False,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Topic :: Communications',
'Topic :: Education :: Testing',
'Topic :: Internet',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Networking',
'Topic :: Utilities',
],
)
| {
"content_hash": "4003fee22706a074c6a0ce7c0c29ce62",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 129,
"avg_line_length": 30.684684684684683,
"alnum_prop": 0.6588373458602467,
"repo_name": "zatosource/zato-enclog",
"id": "ab6e7cb362cf73fb01f76c81762582b3bcfe6afd",
"size": "3431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "11830"
}
],
"symlink_target": ""
} |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.external_payment_schedule_base import ExternalPaymentScheduleBase
from plaid.model.payment_schedule_interval import PaymentScheduleInterval
globals()['ExternalPaymentScheduleBase'] = ExternalPaymentScheduleBase
globals()['PaymentScheduleInterval'] = PaymentScheduleInterval
class ExternalPaymentScheduleRequest(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'interval': (PaymentScheduleInterval,), # noqa: E501
'interval_execution_day': (int,), # noqa: E501
'start_date': (date,), # noqa: E501
'end_date': (date, none_type,), # noqa: E501
'adjusted_start_date': (date, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'interval': 'interval', # noqa: E501
'interval_execution_day': 'interval_execution_day', # noqa: E501
'start_date': 'start_date', # noqa: E501
'end_date': 'end_date', # noqa: E501
'adjusted_start_date': 'adjusted_start_date', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, interval, interval_execution_day, start_date, *args, **kwargs): # noqa: E501
"""ExternalPaymentScheduleRequest - a model defined in OpenAPI
Args:
interval (PaymentScheduleInterval):
interval_execution_day (int): The day of the interval on which to schedule the payment. If the payment interval is weekly, `interval_execution_day` should be an integer from 1 (Monday) to 7 (Sunday). If the payment interval is monthly, `interval_execution_day` should be an integer indicating which day of the month to make the payment on. Integers from 1 to 28 can be used to make a payment on that day of the month. Negative integers from -1 to -5 can be used to make a payment relative to the end of the month. To make a payment on the last day of the month, use -1; to make the payment on the second-to-last day, use -2, and so on.
start_date (date): A date in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). Standing order payments will begin on the first `interval_execution_day` on or after the `start_date`. If the first `interval_execution_day` on or after the start date is also the same day that `/payment_initiation/payment/create` was called, the bank *may* make the first payment on that day, but it is not guaranteed to do so.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
end_date (date, none_type): A date in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). Standing order payments will end on the last `interval_execution_day` on or before the `end_date`. If the only `interval_execution_day` between the start date and the end date (inclusive) is also the same day that `/payment_initiation/payment/create` was called, the bank *may* make a payment on that day, but it is not guaranteed to do so.. [optional] # noqa: E501
adjusted_start_date (date, none_type): The start date sent to the bank after adjusting for holidays or weekends. Will be provided in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). If the start date did not require adjustment, this field will be `null`.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'interval': interval,
'interval_execution_day': interval_execution_day,
'start_date': start_date,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ExternalPaymentScheduleBase,
],
'oneOf': [
],
}
| {
"content_hash": "c39803e38b2522a40c4b015b33e98218",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 649,
"avg_line_length": 47.69198312236287,
"alnum_prop": 0.5984251968503937,
"repo_name": "plaid/plaid-python",
"id": "f323bf6120667c3cc0321012c42b29518b03350b",
"size": "11303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/external_payment_schedule_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
import keyword
import _jsre as re
from browser import html
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'_'
digits = '0123456789'
builtin_funcs = """abs|dict|help|min|setattr|
all|dir|hex|next|slice|
any|divmod|id|object|sorted|
ascii|enumerate|input|oct|staticmethod|
bin|eval|int|open|str|
bool|exec|isinstance|ord|sum|
bytearray|filter|issubclass|pow|super|
bytes|float|iter|print|tuple|
callable|format|len|property|type|
chr|frozenset|list|range|vars|
classmethod|getattr|locals|repr|zip|
compile|globals|map|reversed|__import__|
complex|hasattr|max|round|
delattr|hash|memoryview|set|
"""
kw_pattern = '^('+'|'.join(keyword.kwlist)+')$'
bf_pattern = '^('+builtin_funcs.replace("\n", "")+')$'
def escape(txt):
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
return txt
def highlight(txt):
res = html.PRE()
i = 0
name = ''
while i < len(txt):
car = txt[i]
if car in ["'", '"']:
found_match = False
k = i + 1
while k < len(txt):
if txt[k] == car:
nb_as = 0
j = k - 1
while True:
if txt[j] == '\\':
nb_as += 1
j -= 1
else:
break
if nb_as % 2 == 0:
res <= name + html.SPAN(escape(txt[i:k + 1]),
Class="python-string")
i = k
name = ''
found_match = True
break
k += 1
if not found_match:
name += car
elif car == '#': # comment
end = txt.find('\n', i)
if end== -1:
res <= html.SPAN(escape(txt[i:]), Class="python-comment")
break
else:
res <= html.SPAN(escape(txt[i:end]), Class="python-comment")
i = end-1
elif car in letters:
name += car
elif car in digits and name:
name += car
else:
if name:
if re.search(kw_pattern,name):
res <= html.SPAN(name, Class="python-keyword")
elif re.search(bf_pattern,name):
res <= html.SPAN(name, Class="python-builtin")
else:
res <= name
name = ''
res <= car
i += 1
res <= name
return res | {
"content_hash": "e4d29403e4dbd6cd1db14b5dcccf51a7",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 29.655172413793103,
"alnum_prop": 0.45271317829457364,
"repo_name": "jonathanverner/brython",
"id": "5316a6d5ddb6e251a315b6731d9d53d95f0db858",
"size": "2580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "www/src/Lib/browser/highlight.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17046"
},
{
"name": "HTML",
"bytes": "4989399"
},
{
"name": "JavaScript",
"bytes": "5841054"
},
{
"name": "Makefile",
"bytes": "61"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "14816501"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "387"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Test Atlas Data Lake."""
import os
import sys
sys.path[0:0] = [""]
from test import IntegrationTest, client_context, unittest
from test.crud_v2_format import TestCrudV2
from test.utils import (
OvertCommandListener,
TestCreator,
rs_client_noauth,
rs_or_single_client,
)
# Location of JSON test specifications.
_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake")
class TestDataLakeMustConnect(IntegrationTest):
def test_connected_to_data_lake(self):
data_lake = os.environ.get("DATA_LAKE")
if not data_lake:
self.skipTest("DATA_LAKE is not set")
self.assertTrue(
client_context.is_data_lake,
"client context.is_data_lake must be True when DATA_LAKE is set",
)
class TestDataLakeProse(IntegrationTest):
# Default test database and collection names.
TEST_DB = "test"
TEST_COLLECTION = "driverdata"
@classmethod
@client_context.require_data_lake
def setUpClass(cls):
super(TestDataLakeProse, cls).setUpClass()
# Test killCursors
def test_1(self):
listener = OvertCommandListener()
client = rs_or_single_client(event_listeners=[listener])
cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2)
next(cursor)
# find command assertions
find_cmd = listener.succeeded_events[-1]
self.assertEqual(find_cmd.command_name, "find")
cursor_id = find_cmd.reply["cursor"]["id"]
cursor_ns = find_cmd.reply["cursor"]["ns"]
# killCursors command assertions
cursor.close()
started = listener.started_events[-1]
self.assertEqual(started.command_name, "killCursors")
succeeded = listener.succeeded_events[-1]
self.assertEqual(succeeded.command_name, "killCursors")
self.assertIn(cursor_id, started.command["cursors"])
target_ns = ".".join([started.command["$db"], started.command["killCursors"]])
self.assertEqual(cursor_ns, target_ns)
self.assertIn(cursor_id, succeeded.reply["cursorsKilled"])
# Test no auth
def test_2(self):
client = rs_client_noauth()
client.admin.command("ping")
# Test with auth
def test_3(self):
for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]:
client = rs_or_single_client(authMechanism=mechanism)
client[self.TEST_DB][self.TEST_COLLECTION].find_one()
class DataLakeTestSpec(TestCrudV2):
# Default test database and collection names.
TEST_DB = "test"
TEST_COLLECTION = "driverdata"
@classmethod
@client_context.require_data_lake
def setUpClass(cls):
super(DataLakeTestSpec, cls).setUpClass()
def setup_scenario(self, scenario_def):
# Spec tests MUST NOT insert data/drop collection for
# data lake testing.
pass
def create_test(scenario_def, test, name):
def run_scenario(self):
self.run_scenario(scenario_def, test)
return run_scenario
TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "5e3ae595e0f3101dec67a332debd6905",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 86,
"avg_line_length": 29.25925925925926,
"alnum_prop": 0.6525316455696203,
"repo_name": "mongodb/mongo-python-driver",
"id": "4fa38435a3cf65435d8f8fbd9305f6927c152119",
"size": "3742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_data_lake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "183641"
},
{
"name": "Python",
"bytes": "2983153"
},
{
"name": "Shell",
"bytes": "30026"
}
],
"symlink_target": ""
} |
import unittest
from MyHearthStone.utils import constants
__author__ = 'fyabc'
class TestConstants(unittest.TestCase):
def setUp(self):
self.orig_C = constants.C.copy()
def tearDown(self):
constants.C = self.orig_C
def testLoadArgConfig(self):
constants.load_arg_config({
'Frontend': 'text-single',
'Logging': {
'Level': 'WARNING',
'ScreenLog': False,
}
})
self.assertEqual(constants.C.Frontend, 'text-single')
self.assertEqual(constants.C.Logging.Level, 'WARNING')
self.assertEqual(constants.C.Game.Version, self.orig_C.Game.Version)
def testGetPackagePaths(self):
package_paths = constants.get_package_paths()
self.assertIn(constants.SystemPackageDataPath, package_paths)
| {
"content_hash": "79f94f0afd3ff6570192576f43602ee2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 76,
"avg_line_length": 27.933333333333334,
"alnum_prop": 0.6217183770883055,
"repo_name": "fyabc/MiniGames",
"id": "eda0637de2914a1fb658c9e44863996acbef9388",
"size": "882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HearthStone2/test/utils/test_constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "821180"
}
],
"symlink_target": ""
} |
"""
Client side of the conductor RPC API.
"""
import random
import oslo_messaging as messaging
from ironic.common import exception
from ironic.common import hash_ring
from ironic.common.i18n import _
from ironic.common import release_mappings as versions
from ironic.common import rpc
from ironic.conductor import manager
from ironic.conf import CONF
from ironic.objects import base as objects_base
class ConductorAPI(object):
"""Client side of the conductor RPC API.
API version history:
| 1.0 - Initial version.
| Included get_node_power_status
| 1.1 - Added update_node and start_power_state_change.
| 1.2 - Added vendor_passthru.
| 1.3 - Rename start_power_state_change to change_node_power_state.
| 1.4 - Added do_node_deploy and do_node_tear_down.
| 1.5 - Added validate_driver_interfaces.
| 1.6 - change_node_power_state, do_node_deploy and do_node_tear_down
| accept node id instead of node object.
| 1.7 - Added topic parameter to RPC methods.
| 1.8 - Added change_node_maintenance_mode.
| 1.9 - Added destroy_node.
| 1.10 - Remove get_node_power_state
| 1.11 - Added get_console_information, set_console_mode.
| 1.12 - validate_vendor_action, do_vendor_action replaced by single
| vendor_passthru method.
| 1.13 - Added update_port.
| 1.14 - Added driver_vendor_passthru.
| 1.15 - Added rebuild parameter to do_node_deploy.
| 1.16 - Added get_driver_properties.
| 1.17 - Added set_boot_device, get_boot_device and
| get_supported_boot_devices.
| 1.18 - Remove change_node_maintenance_mode.
| 1.19 - Change return value of vendor_passthru and
| driver_vendor_passthru
| 1.20 - Added http_method parameter to vendor_passthru and
| driver_vendor_passthru
| 1.21 - Added get_node_vendor_passthru_methods and
| get_driver_vendor_passthru_methods
| 1.22 - Added configdrive parameter to do_node_deploy.
| 1.23 - Added do_provisioning_action
| 1.24 - Added inspect_hardware method
| 1.25 - Added destroy_port
| 1.26 - Added continue_node_clean
| 1.27 - Convert continue_node_clean to cast
| 1.28 - Change exceptions raised by destroy_node
| 1.29 - Change return value of vendor_passthru and
| driver_vendor_passthru to a dictionary
| 1.30 - Added set_target_raid_config and
| get_raid_logical_disk_properties
| 1.31 - Added Versioned Objects indirection API methods:
| object_class_action_versions, object_action and
| object_backport_versions
| 1.32 - Add do_node_clean
| 1.33 - Added update and destroy portgroup.
| 1.34 - Added heartbeat
| 1.35 - Added destroy_volume_connector and update_volume_connector
| 1.36 - Added create_node
| 1.37 - Added destroy_volume_target and update_volume_target
| 1.38 - Added vif_attach, vif_detach, vif_list
| 1.39 - Added timeout optional parameter to change_node_power_state
| 1.40 - Added inject_nmi
"""
# NOTE(rloo): This must be in sync with manager.ConductorManager's.
RPC_API_VERSION = '1.40'
def __init__(self, topic=None):
super(ConductorAPI, self).__init__()
self.topic = topic
if self.topic is None:
self.topic = manager.MANAGER_TOPIC
target = messaging.Target(topic=self.topic,
version='1.0')
serializer = objects_base.IronicObjectSerializer()
release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version)
version_cap = (release_ver['rpc'] if release_ver
else self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
# NOTE(deva): this is going to be buggy
self.ring_manager = hash_ring.HashRingManager()
def get_topic_for(self, node):
"""Get the RPC topic for the conductor service the node is mapped to.
:param node: a node object.
:returns: an RPC topic string.
:raises: NoValidHost
"""
self.ring_manager.reset()
try:
ring = self.ring_manager[node.driver]
dest = ring.get_nodes(node.uuid.encode('utf-8'),
replicas=CONF.hash_distribution_replicas)
return '%s.%s' % (self.topic, dest.pop())
except exception.DriverNotFound:
reason = (_('No conductor service registered which supports '
'driver %s.') % node.driver)
raise exception.NoValidHost(reason=reason)
def get_topic_for_driver(self, driver_name):
"""Get RPC topic name for a conductor supporting the given driver.
The topic is used to route messages to the conductor supporting
the specified driver. A conductor is selected at random from the
set of qualified conductors.
:param driver_name: the name of the driver to route to.
:returns: an RPC topic string.
:raises: DriverNotFound
"""
self.ring_manager.reset()
ring = self.ring_manager[driver_name]
host = random.choice(list(ring.nodes))
return self.topic + "." + host
def create_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor validate and create a node.
Create the node's information in the database and return a node object.
:param context: request context.
:param node_obj: a created (but not saved) node object.
:param topic: RPC topic. Defaults to self.topic.
:returns: created node object.
:raises: InterfaceNotFoundInEntrypoint if validation fails for any
dynamic interfaces (e.g. network_interface).
:raises: NoValidDefaultForInterface if no default can be calculated
for some interfaces, and explicit values must be provided.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.36')
return cctxt.call(context, 'create_node', node_obj=node_obj)
def update_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor update the node's information.
Update the node's information in the database and return a node object.
The conductor will lock the node while it validates the supplied
information. If driver_info is passed, it will be validated by
the core drivers. If instance_uuid is passed, it will be set or unset
only if the node is properly configured.
Note that power_state should not be passed via this method.
Use change_node_power_state for initiating driver actions.
:param context: request context.
:param node_obj: a changed (but not saved) node object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated node object, including all fields.
:raises: NoValidDefaultForInterface if no default can be calculated
for some interfaces, and explicit values must be provided.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.1')
return cctxt.call(context, 'update_node', node_obj=node_obj)
def change_node_power_state(self, context, node_id, new_state,
topic=None, timeout=None):
"""Change a node's power state.
Synchronously, acquire lock and start the conductor background task
to change power state of a node.
:param context: request context.
:param node_id: node id or uuid.
:param new_state: one of ironic.common.states power state values
:param timeout: timeout (in seconds) positive integer (> 0) for any
power state. ``None`` indicates to use default timeout.
:param topic: RPC topic. Defaults to self.topic.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.39')
return cctxt.call(context, 'change_node_power_state', node_id=node_id,
new_state=new_state, timeout=timeout)
def vendor_passthru(self, context, node_id, driver_method, http_method,
info, topic=None):
"""Receive requests for vendor-specific actions.
Synchronously validate driver specific info or get driver status,
and if successful invokes the vendor method. If the method mode
is async the conductor will start background worker to perform
vendor action.
:param context: request context.
:param node_id: node id or uuid.
:param driver_method: name of method for driver.
:param http_method: the HTTP method used for the request.
:param info: info for node driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if supplied info is not valid.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: NodeLocked if node is locked by another conductor.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'vendor_passthru', node_id=node_id,
driver_method=driver_method,
http_method=http_method,
info=info)
def driver_vendor_passthru(self, context, driver_name, driver_method,
http_method, info, topic=None):
"""Pass vendor-specific calls which don't specify a node to a driver.
Handles driver-level vendor passthru calls. These calls don't
require a node UUID and are executed on a random conductor with
the specified driver. If the method mode is async the conductor
will start background worker to perform vendor action.
:param context: request context.
:param driver_name: name of the driver on which to call the method.
:param driver_method: name of the vendor method, for use by the driver.
:param http_method: the HTTP method used for the request.
:param info: data to pass through to the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue for parameter errors.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if the driver doesn't have a vendor
interface, or if the vendor interface does not support the
specified driver_method.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's vendor
interface.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'driver_vendor_passthru',
driver_name=driver_name,
driver_method=driver_method,
http_method=http_method,
info=info)
def get_node_vendor_passthru_methods(self, context, node_id, topic=None):
"""Retrieve information about vendor methods of the given node.
:param context: an admin context.
:param node_id: the id or uuid of a node.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_node_vendor_passthru_methods',
node_id=node_id)
def get_driver_vendor_passthru_methods(self, context, driver_name,
topic=None):
"""Retrieve information about vendor methods of the given driver.
:param context: an admin context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's vendor
interface.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_driver_vendor_passthru_methods',
driver_name=driver_name)
def do_node_deploy(self, context, node_id, rebuild, configdrive,
topic=None):
"""Signal to conductor service to perform a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param rebuild: True if this is a rebuild request.
:param configdrive: A gzipped and base64 encoded configdrive.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
undeployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.22')
return cctxt.call(context, 'do_node_deploy', node_id=node_id,
rebuild=rebuild, configdrive=configdrive)
def do_node_tear_down(self, context, node_id, topic=None):
"""Signal to conductor service to tear down a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
deployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'do_node_tear_down', node_id=node_id)
def do_provisioning_action(self, context, node_id, action, topic=None):
"""Signal to conductor service to perform the given action on a node.
:param context: request context.
:param node_id: node id or uuid.
:param action: an action. One of ironic.common.states.VERBS
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InvalidStateRequested if the requested action can not
be performed.
This encapsulates some provisioning actions in a single call.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.23')
return cctxt.call(context, 'do_provisioning_action',
node_id=node_id, action=action)
def continue_node_clean(self, context, node_id, topic=None):
"""Signal to conductor service to start the next cleaning action.
NOTE(JoshNang) this is an RPC cast, there will be no response or
exception raised by the conductor for this RPC.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.27')
return cctxt.cast(context, 'continue_node_clean',
node_id=node_id)
def validate_driver_interfaces(self, context, node_id, topic=None):
"""Validate the `core` and `standardized` interfaces for drivers.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary containing the results of each
interface validation.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.5')
return cctxt.call(context, 'validate_driver_interfaces',
node_id=node_id)
def destroy_node(self, context, node_id, topic=None):
"""Delete a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeAssociated if the node contains an instance
associated with it.
:raises: InvalidState if the node is in the wrong provision
state to perform deletion.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.9')
return cctxt.call(context, 'destroy_node', node_id=node_id)
def get_console_information(self, context, node_id, topic=None):
"""Get connection information about the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'get_console_information', node_id=node_id)
def set_console_mode(self, context, node_id, enabled, topic=None):
"""Enable/Disable the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:param enabled: Boolean value; whether the console is enabled or
disabled.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'set_console_mode', node_id=node_id,
enabled=enabled)
def update_port(self, context, port_obj, topic=None):
"""Synchronously, have a conductor update the port's information.
Update the port's information in the database and return a port object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param port_obj: a changed (but not saved) port object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated port object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
return cctxt.call(context, 'update_port', port_obj=port_obj)
def update_portgroup(self, context, portgroup_obj, topic=None):
"""Synchronously, have a conductor update the portgroup's information.
Update the portgroup's information in the database and return a
portgroup object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param portgroup_obj: a changed (but not saved) portgroup object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated portgroup object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'update_portgroup',
portgroup_obj=portgroup_obj)
def destroy_portgroup(self, context, portgroup, topic=None):
"""Delete a portgroup.
:param context: request context.
:param portgroup: portgroup object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the portgroup does
not exist.
:raises: PortgroupNotEmpty if portgroup is not empty
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'destroy_portgroup', portgroup=portgroup)
def get_driver_properties(self, context, driver_name, topic=None):
"""Get the properties of the driver.
:param context: request context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary with <property name>:<property description>
entries.
:raises: DriverNotFound.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
return cctxt.call(context, 'get_driver_properties',
driver_name=driver_name)
def set_boot_device(self, context, node_id, device, persistent=False,
topic=None):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node. Be aware
that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'set_boot_device', node_id=node_id,
device=device, persistent=persistent)
def get_boot_device(self, context, node_id, topic=None):
"""Get the current boot device.
Returns the current boot device of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_boot_device', node_id=node_id)
def inject_nmi(self, context, node_id, topic=None):
"""Inject NMI for a node.
Inject NMI (Non Maskable Interrupt) for a node immediately.
Be aware that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management or management.inject_nmi.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.40')
return cctxt.call(context, 'inject_nmi', node_id=node_id)
def get_supported_boot_devices(self, context, node_id, topic=None):
"""Get the list of supported devices.
Returns the list of supported boot devices of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_supported_boot_devices',
node_id=node_id)
def inspect_hardware(self, context, node_id, topic=None):
"""Signals the conductor service to perform hardware introspection.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: HardwareInspectionFailure
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support inspection.
:raises: InvalidStateRequested if 'inspect' is not a valid
action to do in the current state.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.24')
return cctxt.call(context, 'inspect_hardware', node_id=node_id)
def destroy_port(self, context, port, topic=None):
"""Delete a port.
:param context: request context.
:param port: port object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the port does not
exist.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.25')
return cctxt.call(context, 'destroy_port', port=port)
def set_target_raid_config(self, context, node_id, target_raid_config,
topic=None):
"""Stores the target RAID configuration on the node.
Stores the target RAID configuration on node.target_raid_config
:param context: request context.
:param node_id: node id or uuid.
:param target_raid_config: Dictionary containing the target RAID
configuration. It may be an empty dictionary as well.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support RAID configuration.
:raises: InvalidParameterValue, if validation of target raid config
fails.
:raises: MissingParameterValue, if some required parameters are
missing.
:raises: NodeLocked if node is locked by another conductor.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'set_target_raid_config',
node_id=node_id,
target_raid_config=target_raid_config)
def get_raid_logical_disk_properties(self, context, driver_name,
topic=None):
"""Get the logical disk properties for RAID configuration.
Gets the information about logical disk properties which can
be specified in the input RAID configuration.
:param context: request context.
:param driver_name: name of the driver
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the driver doesn't
support RAID configuration.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's RAID
interface.
:returns: A dictionary containing the properties that can be mentioned
for logical disks and a textual description for them.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'get_raid_logical_disk_properties',
driver_name=driver_name)
def do_node_clean(self, context, node_id, clean_steps, topic=None):
"""Signal to conductor service to perform manual cleaning on a node.
:param context: request context.
:param node_id: node ID or UUID.
:param clean_steps: a list of clean step dictionaries.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if validation of power driver interface
failed.
:raises: InvalidStateRequested if cleaning can not be performed.
:raises: NodeInMaintenance if node is in maintenance mode.
:raises: NodeLocked if node is locked by another conductor.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.32')
return cctxt.call(context, 'do_node_clean',
node_id=node_id, clean_steps=clean_steps)
def heartbeat(self, context, node_id, callback_url, topic=None):
"""Process a node heartbeat.
:param context: request context.
:param node_id: node ID or UUID.
:param callback_url: URL to reach back to the ramdisk.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.34')
return cctxt.call(context, 'heartbeat', node_id=node_id,
callback_url=callback_url)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
"""Perform an action on a VersionedObject class.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param object_versions: A dict of {objname: version} mappings
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on a VersionedObject instance.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objinst: The object instance on which to perform the action
:param objmethod: The name of the action method to call
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: A tuple with the updates made to the object and
the result of the action method
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
"""Perform a backport of an object instance.
The default behavior of the base VersionedObjectSerializer, upon
receiving an object with a version newer than what is in the local
registry, is to call this method to request a backport of the object.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param object_versions: A dict of {objname: version} mappings
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The downgraded instance of objinst
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
def destroy_volume_connector(self, context, connector, topic=None):
"""Delete a volume connector.
Delete the volume connector. The conductor will lock the related node
during this operation.
:param context: request context
:param connector: volume connector object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.35')
return cctxt.call(context, 'destroy_volume_connector',
connector=connector)
def update_volume_connector(self, context, connector, topic=None):
"""Update the volume connector's information.
Update the volume connector's information in the database and return
a volume connector object. The conductor will lock the related node
during this operation.
:param context: request context
:param connector: a changed (but not saved) volume connector object
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if the volume connector's UUID is being
changed
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
:raises: VolumeConnectorTypeAndIdAlreadyExists if another connector
already exists with the same values for type and connector_id
fields
:returns: updated volume connector object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.35')
return cctxt.call(context, 'update_volume_connector',
connector=connector)
def destroy_volume_target(self, context, target, topic=None):
"""Delete a volume target.
:param context: request context
:param target: volume target object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the target does
not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.37')
return cctxt.call(context, 'destroy_volume_target',
target=target)
def update_volume_target(self, context, target, topic=None):
"""Update the volume target's information.
Update the volume target's information in the database and return a
volume target object. The conductor will lock the related node during
this operation.
:param context: request context
:param target: a changed (but not saved) volume target object
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if the volume target's UUID is being
changed
:raises: NodeLocked if the node is already locked
:raises: NodeNotFound if the node associated with the volume target
does not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:returns: updated volume target object, including all fields
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.37')
return cctxt.call(context, 'update_volume_target',
target=target)
def vif_attach(self, context, node_id, vif_info, topic=None):
"""Attach VIF to a node
:param context: request context.
:param node_id: node ID or UUID.
:param vif_info: a dictionary representing VIF object.
It must have an 'id' key, whose value is a unique
identifier for that VIF.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked, if node has an exclusive lock held on it
:raises: NetworkError, if an error occurs during attaching the VIF.
:raises: InvalidParameterValue, if a parameter that's required for
VIF attach is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_attach', node_id=node_id,
vif_info=vif_info)
def vif_detach(self, context, node_id, vif_id, topic=None):
"""Detach VIF from a node
:param context: request context.
:param node_id: node ID or UUID.
:param vif_id: an ID of a VIF.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked, if node has an exclusive lock held on it
:raises: NetworkError, if an error occurs during detaching the VIF.
:raises: InvalidParameterValue, if a parameter that's required for
VIF detach is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_detach', node_id=node_id,
vif_id=vif_id)
def vif_list(self, context, node_id, topic=None):
"""List attached VIFs for a node
:param context: request context.
:param node_id: node ID or UUID.
:param topic: RPC topic. Defaults to self.topic.
:returns: List of VIF dictionaries, each dictionary will have an
'id' entry with the ID of the VIF.
:raises: NetworkError, if an error occurs during listing the VIFs.
:raises: InvalidParameterValue, if a parameter that's required for
VIF list is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_list', node_id=node_id)
| {
"content_hash": "3ab9377309d2e540e587e34e00ba5f87",
"timestamp": "",
"source": "github",
"line_count": 926,
"max_line_length": 79,
"avg_line_length": 46.87257019438445,
"alnum_prop": 0.6358630540963967,
"repo_name": "NaohiroTamura/ironic",
"id": "af7c286a406a8914b4575f05c0cc115a89344219",
"size": "44076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/conductor/rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5077786"
},
{
"name": "Shell",
"bytes": "107935"
}
],
"symlink_target": ""
} |
class Dog:
# The init method is called to create an object
# We give default values for the fields if none
# are provided
def __init__(self, name="", height=0, weight=0):
# self allows an object to refer to itself
# It is like how you refer to yourself with my
# We will take the values passed in and assign
# them to the new Dog objects fields (attributes)
self.name = name
self.height = height
self.weight = weight
# Define what happens when the Dog is asked to
# demonstrate its capabilities
def run(self):
print("{} the dog runs".format(self.name))
def eat(self):
print("{} the dog eats".format(self.name))
def bark(self):
print("{} the dog barks".format(self.name))
def main():
# Create a new Dog object
spot = Dog("Spot", 66, 26)
spot.bark()
bowser = Dog()
main()
# ---------- GETTERS & SETTERS ----------
# Getters and Setters are used to protect our objects
# from assigning bad fields or for providing improved
# output
class Square:
def __init__(self, height="0", width="0"):
self.height = height
self.width = width
# This is the getter
@property
def height(self):
print("Retrieving the height")
# Put a __ before this private field
return self.__height
# This is the setter
@height.setter
def height(self, value):
# We protect the height from receiving a bad value
if value.isdigit():
# Put a __ before this private field
self.__height = value
else:
print("Please only enter numbers for height")
# This is the getter
@property
def width(self):
print("Retrieving the width")
return self.__width
# This is the setter
@width.setter
def width(self, value):
if value.isdigit():
self.__width = value
else:
print("Please only enter numbers for width")
def getArea(self):
return int(self.__width) * int(self.__height)
def main():
aSquare = Square()
height = input("Enter height : ")
width = input("Enter width : ")
aSquare.height = height
aSquare.width = width
print("Height :", aSquare.height)
print("Width :", aSquare.width)
print("The Area is :", aSquare.getArea())
main()
# ---------- WARRIORS BATTLE ----------
# We will create a game with this sample output
'''
Sam attacks Paul and deals 9 damage
Paul is down to 10 health
Paul attacks Sam and deals 7 damage
Sam is down to 7 health
Sam attacks Paul and deals 19 damage
Paul is down to -9 health
Paul has Died and Sam is Victorious
Game Over
'''
# We will create a Warrior & Battle class
import random
import math
# Warriors will have names, health, and attack and block maximums
# They will have the capabilities to attack and block random amounts
class Warrior:
def __init__(self, name="warrior", health=0, attkMax=0, blockMax=0):
self.name = name
self.health = health
self.attkMax = attkMax
self.blockMax = blockMax
def attack(self):
# Randomly calculate the attack amount
# random() returns a value from 0.0 to 1.0
attkAmt = self.attkMax * (random.random() + .5)
return attkAmt
def block(self):
# Randomly calculate how much of the attack was blocked
blockAmt = self.blockMax * (random.random() + .5)
return blockAmt
# The Battle class will have the capability to loop until 1 Warrior dies
# The Warriors will each get a turn to attack each turn
class Battle:
def startFight(self, warrior1, warrior2):
# Continue looping until a Warrior dies switching back and
# forth as the Warriors attack each other
while True:
if self.getAttackResult(warrior1, warrior2) == "Game Over":
print("Game Over")
break
if self.getAttackResult(warrior2, warrior1) == "Game Over":
print("Game Over")
break
# A function will receive each Warrior that will attack the other
# Have the attack and block amounts be integers to make the results clean
# Output the results of the fight as it goes
# If a Warrior dies return that result to end the looping in the
# above function
# Make this method static because we don't need to use self
@staticmethod
def getAttackResult(warriorA, warriorB):
warriorAAttkAmt = warriorA.attack()
warriorBBlockAmt = warriorB.block()
damage2WarriorB = math.ceil(warriorAAttkAmt - warriorBBlockAmt)
warriorB.health = warriorB.health - damage2WarriorB
print("{} attacks {} and deals {} damage".format(warriorA.name,
warriorB.name, damage2WarriorB))
print("{} is down to {} health".format(warriorB.name,
warriorB.health))
if warriorB.health <= 0:
print("{} has Died and {} is Victorious".format(warriorB.name,
warriorA.name))
return "Game Over"
else:
return "Fight Again"
def main():
# Create 2 Warriors
paul = Warrior("Paul", 50, 20, 10)
sam = Warrior("Sam", 50, 20, 10)
# Create Battle object
battle = Battle()
# Initiate Battle
battle.startFight(paul, sam)
main() | {
"content_hash": "9257085433aaafd7948f8fb94b9244f8",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 89,
"avg_line_length": 26.552884615384617,
"alnum_prop": 0.6027521274669564,
"repo_name": "alirsamar/pycourse",
"id": "9c70ce85643563e00dfff6ba91c07e8dc72aa24e",
"size": "5989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OOP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49487"
}
],
"symlink_target": ""
} |
from tests.test_helper import *
import os
class TestSetup(unittest.TestCase):
def test_packages_includes_all_packages(self):
with open('setup.py', 'r') as f:
setup_contents = f.read()
packages_line = re.findall('packages=.*', setup_contents)
packages_from_setup = re.findall('"(.*?)"', str(packages_line))
packages_from_directories = ['braintree']
directories_that_dont_have_packages = ['braintree.ssl']
for dirname, dirnames, filenames in os.walk('braintree'):
for subdirname in dirnames:
package_from_directory = re.sub('/', '.', os.path.join(dirname, subdirname))
if package_from_directory not in directories_that_dont_have_packages and subdirname != '__pycache__':
packages_from_directories.append(package_from_directory)
mismatch_message = "List of packages in setup.py doesn't match subdirectories of 'braintree' - " \
+ "add your new directory to 'packages, or if none, `git clean -df` to remove a stale directory"
self.assertEquals(sorted(packages_from_directories), sorted(packages_from_setup), mismatch_message)
| {
"content_hash": "0586790ec2ad92ab4d026cc612145b8f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 117,
"avg_line_length": 56.42857142857143,
"alnum_prop": 0.6489451476793249,
"repo_name": "felixonmars/braintree_python",
"id": "9b78fc554e1386476601ec49edca8ece2e5f5552",
"size": "1185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/test_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "760931"
},
{
"name": "Ruby",
"bytes": "588"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
} |
import sys
import re
sys.path.append("../util")
import Util
import UnsupportedLanguageException
from chunkingConstants import *
# Types of elements on our version stacks
FUNC = "Function"
SBLOCK = "Block"
GENERIC = "Generic"
LINEINDEX = 0
LABELINDEX = 1
#Redo with a polymorphic solution for the languages
class scopeTracker:
#string --> --
#The language tells how the scope changes so we can tell when a block or function ends.
#For example in C/C++ and Java, a { signifies a increase in code block depth.
#In python however, indentation is used.
def __init__(self, language):
#These are pseudo stacks implemented as lists that track the current
#number of open scopes (brackets, tabs, etc), each change gets its own
#entry, which is then deleted when we see a matching closing entry
#Functions and blocks are included in full so they can be matched later
self.oldVerStack = []
self.newVerStack = []
self.lastOldFuncContext = ""
self.lastOldBlockContext = []
self.lastNewFuncContext = ""
self.lastNewBlockContext = []
if(language in Util.supportedLanguages):
self.language = language
else:
raise UnsupportedLanguageException(language + "is not yet supported.")
def clearScope(self):
self.oldVerStack = []
self.newVerStack = []
self.lastOldFuncContext = ""
self.lastOldBlockContext = []
self.lastNewFuncContext = ""
self.lastNewBlockContext = []
#String -> list
#Returns a list giving the sequence of scope changes in this line.
def scopeOrder(self, line, lineType): #Seems to only matter in Bracket Languages
raise NotImplementedError("Base ScopeTracker is Abstract.")
def scopeIncreaseCount(self, line, lineType): #Seems to only matter in Bracket Languages
raise NotImplementedError("Base ScopeTracker is Abstract.")
def scopeDecreaseCount(self, line, lineType): #Seems to only matter in Bracket Languages
raise NotImplementedError("Base ScopeTracker is Abstract.")
#Returns true if this line contains an increased level of scope.
def isScopeIncrease(self, line, lineType):
raise NotImplementedError("Base ScopeTracker is Abstract.")
#Returns true if this line contains an decreased level of scope.
def isScopeDecrease(self, line, lineType):
raise NotImplementedError("Base ScopeTracker is Abstract.")
def appendFunctionEnding(self, line, functionName):
raise NotImplementedError("Base ScopeTracker is Abstract.")
#Returns true if both oldVerStack and newVerStack are empty.
def areAllContextsClosed(self):
return self.oldVerStack == [] and self.newVerStack == []
#Returns a tuple of lists of open function and block contexts in the old and new versions of the
#program. If all contexts are closed (matching brackets in C/C++, no indent in python)
#returns an empty list
def listOpenContexts(self):
return ([o for o in oldVerStack if o[LABELINDEX] == SBLOCK or o[LABELINDEX] == FUNC],
[n for n in newVerStack if n[LABELINDEX] == SBLOCK or n[LABELINDEX] == FUNC])
#list of tuples, label [FUNC|BLOCK|GENERIC] -> string
#Given a list with tuples of string and a label, and a choosen label, return the string of the last
#item in the list that matches that label. If nothing in the stack is found for that label, return
#"" instead.
def getTopType(self, stack, stackType):
for item in reversed(stack):
if(item[LABELINDEX] == stackType):
return item[LINEINDEX]
return ""
#string, [ADD|REMOVE|OTHER], [GENERIC|FUNC|BLOCK] -> --
#Increase the depth of our tracker and add in function or block contexts if they have been discovered.
def increaseScope(self, line, lineType, changeType):
raise NotImplementedError("Base ScopeTracker is Abstract.")
#string, [ADD|REMOVE|OTHER] -> --
#Decrease our current scope and close out any function or block contexts if necessary.
def decreaseScope(self, line, lineType):
raise NotImplementedError("Base ScopeTracker is Abstract.")
#Return the surrounding functional context or "" if not on the stack
def getFuncContext(self, lineType):
if(lineType == ADD or lineType == OTHER):
return self.lastNewFuncContext
elif(lineType == REMOVE):
return self.lastOldFuncContext
else:
assert("Not a valid line type")
#Return the surrounding block contexts or [] if not on the stack
def getBlockContext(self, lineType):
if(lineType == ADD or lineType == OTHER):
return self.lastNewBlockContext
elif(lineType == REMOVE):
return self.lastOldBlockContext
else:
assert("Not a valid line type")
#A debug function for printing the objects variables.
def printScope(self):
print("------------------<Scope Obj>------------------")
print("Language:")
print(self.language)
print("Old Stack:")
print(self.oldVerStack)
print("Old Func Cache:")
print(self.lastOldFuncContext)
print("Old Block Cache:")
print(self.lastOldBlockContext)
print("New Stack:")
print(self.newVerStack)
print("New Func Cache:")
print(self.lastNewFuncContext)
print("New Block Cache:")
print(self.lastNewBlockContext)
print("------------------<Scope Obj>------------------")
| {
"content_hash": "ac898896deac546578f34a18f1a359af",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 106,
"avg_line_length": 40.87591240875913,
"alnum_prop": 0.6614285714285715,
"repo_name": "Yagniksuchak/CodeParser",
"id": "24546af7012bb4a0693b4b89de5fca661f96c949",
"size": "5600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/logChunk/scopeTracker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "9896"
},
{
"name": "Python",
"bytes": "300828"
}
],
"symlink_target": ""
} |
import luigi
luigi.namespace("mynamespace")
class Foo(luigi.Task):
p = luigi.Parameter()
class Bar(Foo):
task_namespace = "othernamespace" # namespace override
luigi.namespace()
| {
"content_hash": "7a26d890028af8bd05d86a75ee9a3a5b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 14.846153846153847,
"alnum_prop": 0.7098445595854922,
"repo_name": "cpcloud/luigi",
"id": "4d8439d91c1c6114d1bf8175b0e9e49c43034629",
"size": "768",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/namespace_test_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "35864"
},
{
"name": "Python",
"bytes": "596754"
}
],
"symlink_target": ""
} |
"""fix polymorphic_type
Revision ID: 51063fb35c12
Revises:
Create Date: 2014-11-26 14:41:55.789000
"""
# revision identifiers, used by Alembic.
revision = '51063fb35c12'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
update_pmtype(['language', 'contribution', 'parameter'], 'base', 'custom')
def downgrade():
update_pmtype(['language', 'contribution', 'parameter'], 'custom', 'base')
def update_pmtype(tablenames, before, after):
for table in tablenames:
op.execute(sa.text('UPDATE %s SET polymorphic_type = :after '
'WHERE polymorphic_type = :before' % table
).bindparams(before=before, after=after))
| {
"content_hash": "3457112d189364418fa337de21ad1ee1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 23.612903225806452,
"alnum_prop": 0.6844262295081968,
"repo_name": "clld/nts",
"id": "474339adc52f733f718c9a893e55218285e6a254",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/51063fb35c12_fix_polymorphic_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Mako",
"bytes": "15586"
},
{
"name": "Python",
"bytes": "45676"
}
],
"symlink_target": ""
} |
'''
Created on 24 Aug 2017
Queue via stacks
@author: igoroya
'''
from chapter3 import utils
class QueueV2(object):
def __init__(self):
self._input_stack = utils.Stack()
self._output_stack = utils.Stack()
# Add this to avoid making twice the 2 stack movement when peek & remove
self.__cached_peek = None
def add_item(self, item):
'''
add an item at the end of the queue
'''
if(self.__cached_peek is None):
self.__cached_peek = item
self._input_stack.push(item)
def remove(self):
'''
removes 1st item of the queue
'''
while (self._input_stack):
self._output_stack.push(self._input_stack.peek())
self._input_stack.remove()
# Pop the one of top: this is the oldest one
self._output_stack.remove()
self.__cached_peek = self._output_stack.peek()
#move again all to the 1st stack
while (self._output_stack):
self._input_stack.push(self._output_stack.peek())
self._output_stack.remove()
def peek(self):
'''
`returns to the top of the queue (without removing)
'''
return self.__cached_peek
def is_empty(self):
'''
Returns True if the queu is empty
'''
return self._input_stack.is_empty()
def __repr__(self):
return self._input_stack.__repr__()
if __name__ == '__main__':
my_queue = QueueV2()
utils.add_some_values_queue(my_queue, 15)
print(my_queue)
print("-------------")
print("peek next: {}".format(my_queue.peek()))
my_queue.remove()
print("peek after removing one: {}".format(my_queue.peek()))
print("-------------")
print(my_queue)
new_val = "perico"
print("Adding entry: {}".format(new_val))
my_queue.add_item(new_val)
print(my_queue)
print("peek next: {}".format(my_queue.peek()))
my_queue.remove()
print("peek after removing one: {}".format(my_queue.peek()))
print("-------------")
print(my_queue)
| {
"content_hash": "eb29002fceefaf62ca4e6b1273320209",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 80,
"avg_line_length": 26.705128205128204,
"alnum_prop": 0.5530484877580413,
"repo_name": "igoroya/igor-oya-solutions-cracking-coding-interview",
"id": "9ddc461015311ca06e711fc2588c398504bb14f6",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crackingcointsolutions/chapter3/excersise4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66675"
}
],
"symlink_target": ""
} |
"""Build an index mapping word -> list of occurrences"""
import sys
import re
import collections
WORD_RE = re.compile(r'\w+')
index = collections.defaultdict(list) # <1>
with open(sys.argv[1], encoding='utf-8') as fp:
for line_no, line in enumerate(fp, 1):
for match in WORD_RE.finditer(line):
word = match.group()
column_no = match.start()+1
location = (line_no, column_no)
index[word].append(location) # <2>
# print in alphabetical order
for word in sorted(index, key=str.upper):
print(word, index[word])
# END INDEX_DEFAULT
| {
"content_hash": "24aeebe2029d414580304f420f40bcf4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.6283333333333333,
"repo_name": "fluentpython/example-code",
"id": "8d3ae587f823a84bed5f2f7262cff2b0ed078d14",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "03-dict-set/index_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5286"
},
{
"name": "Java",
"bytes": "3443"
},
{
"name": "JavaScript",
"bytes": "323"
},
{
"name": "Python",
"bytes": "556101"
},
{
"name": "Shell",
"bytes": "946"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingMedian'] , ['Seasonal_Second'] , ['LSTM'] ); | {
"content_hash": "7e82252d213956cb2a1e8cbf2ffccb71",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 90,
"avg_line_length": 40.75,
"alnum_prop": 0.7177914110429447,
"repo_name": "antoinecarme/pyaf",
"id": "b8f1d757e1e8b91c7921b95c2c41e9b4cbe0229b",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_Second_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import glob
import os
import sys
import unittest
if __name__ == '__main__':
suite = unittest.TestSuite()
cur_dir = os.path.dirname(os.path.realpath(__file__))
for testname in glob.glob(os.path.join(cur_dir, '*_test.py')):
print('Adding Test: ' + testname)
module = __import__(os.path.basename(testname)[:-3])
suite.addTests(unittest.defaultTestLoader.loadTestsFromModule(module))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
| {
"content_hash": "a0775ec6e12bb8801751391f8d5c1820",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 29.789473684210527,
"alnum_prop": 0.6890459363957597,
"repo_name": "scheib/chromium",
"id": "3df776ff32d2b1b12a692cb6cd85a75a3c24883c",
"size": "756",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/idl_parser/run_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ConnectCustomConfiguration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_envelope_publish': 'str',
'allow_salesforce_publish': 'str',
'all_users': 'str',
'all_users_except': 'str',
'configuration_type': 'str',
'connect_id': 'str',
'delivery_mode': 'str',
'enable_log': 'str',
'envelope_events': 'list[str]',
'event_data': 'ConnectEventData',
'events': 'list[str]',
'external_folder_id': 'str',
'external_folder_label': 'str',
'group_ids': 'list[str]',
'include_certificate_of_completion': 'str',
'include_cert_soap_header': 'str',
'include_document_fields': 'str',
'include_documents': 'str',
'include_envelope_void_reason': 'str',
'include_hmac': 'str',
'include_sender_accountas_custom_field': 'str',
'include_time_zone_information': 'str',
'name': 'str',
'password': 'str',
'recipient_events': 'list[str]',
'require_mutual_tls': 'str',
'requires_acknowledgement': 'str',
'salesforce_api_version': 'str',
'salesforce_authcode': 'str',
'salesforce_call_back_url': 'str',
'salesforce_documents_as_content_files': 'str',
'sender_override': 'str',
'sender_selectable_items': 'list[str]',
'sf_objects': 'list[ConnectSalesforceObject]',
'sign_message_with_x509_certificate': 'str',
'soap_namespace': 'str',
'url_to_publish_to': 'str',
'user_ids': 'list[str]',
'user_name': 'str',
'use_soap_interface': 'str'
}
attribute_map = {
'allow_envelope_publish': 'allowEnvelopePublish',
'allow_salesforce_publish': 'allowSalesforcePublish',
'all_users': 'allUsers',
'all_users_except': 'allUsersExcept',
'configuration_type': 'configurationType',
'connect_id': 'connectId',
'delivery_mode': 'deliveryMode',
'enable_log': 'enableLog',
'envelope_events': 'envelopeEvents',
'event_data': 'eventData',
'events': 'events',
'external_folder_id': 'externalFolderId',
'external_folder_label': 'externalFolderLabel',
'group_ids': 'groupIds',
'include_certificate_of_completion': 'includeCertificateOfCompletion',
'include_cert_soap_header': 'includeCertSoapHeader',
'include_document_fields': 'includeDocumentFields',
'include_documents': 'includeDocuments',
'include_envelope_void_reason': 'includeEnvelopeVoidReason',
'include_hmac': 'includeHMAC',
'include_sender_accountas_custom_field': 'includeSenderAccountasCustomField',
'include_time_zone_information': 'includeTimeZoneInformation',
'name': 'name',
'password': 'password',
'recipient_events': 'recipientEvents',
'require_mutual_tls': 'requireMutualTls',
'requires_acknowledgement': 'requiresAcknowledgement',
'salesforce_api_version': 'salesforceApiVersion',
'salesforce_authcode': 'salesforceAuthcode',
'salesforce_call_back_url': 'salesforceCallBackUrl',
'salesforce_documents_as_content_files': 'salesforceDocumentsAsContentFiles',
'sender_override': 'senderOverride',
'sender_selectable_items': 'senderSelectableItems',
'sf_objects': 'sfObjects',
'sign_message_with_x509_certificate': 'signMessageWithX509Certificate',
'soap_namespace': 'soapNamespace',
'url_to_publish_to': 'urlToPublishTo',
'user_ids': 'userIds',
'user_name': 'userName',
'use_soap_interface': 'useSoapInterface'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ConnectCustomConfiguration - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_envelope_publish = None
self._allow_salesforce_publish = None
self._all_users = None
self._all_users_except = None
self._configuration_type = None
self._connect_id = None
self._delivery_mode = None
self._enable_log = None
self._envelope_events = None
self._event_data = None
self._events = None
self._external_folder_id = None
self._external_folder_label = None
self._group_ids = None
self._include_certificate_of_completion = None
self._include_cert_soap_header = None
self._include_document_fields = None
self._include_documents = None
self._include_envelope_void_reason = None
self._include_hmac = None
self._include_sender_accountas_custom_field = None
self._include_time_zone_information = None
self._name = None
self._password = None
self._recipient_events = None
self._require_mutual_tls = None
self._requires_acknowledgement = None
self._salesforce_api_version = None
self._salesforce_authcode = None
self._salesforce_call_back_url = None
self._salesforce_documents_as_content_files = None
self._sender_override = None
self._sender_selectable_items = None
self._sf_objects = None
self._sign_message_with_x509_certificate = None
self._soap_namespace = None
self._url_to_publish_to = None
self._user_ids = None
self._user_name = None
self._use_soap_interface = None
self.discriminator = None
setattr(self, "_{}".format('allow_envelope_publish'), kwargs.get('allow_envelope_publish', None))
setattr(self, "_{}".format('allow_salesforce_publish'), kwargs.get('allow_salesforce_publish', None))
setattr(self, "_{}".format('all_users'), kwargs.get('all_users', None))
setattr(self, "_{}".format('all_users_except'), kwargs.get('all_users_except', None))
setattr(self, "_{}".format('configuration_type'), kwargs.get('configuration_type', None))
setattr(self, "_{}".format('connect_id'), kwargs.get('connect_id', None))
setattr(self, "_{}".format('delivery_mode'), kwargs.get('delivery_mode', None))
setattr(self, "_{}".format('enable_log'), kwargs.get('enable_log', None))
setattr(self, "_{}".format('envelope_events'), kwargs.get('envelope_events', None))
setattr(self, "_{}".format('event_data'), kwargs.get('event_data', None))
setattr(self, "_{}".format('events'), kwargs.get('events', None))
setattr(self, "_{}".format('external_folder_id'), kwargs.get('external_folder_id', None))
setattr(self, "_{}".format('external_folder_label'), kwargs.get('external_folder_label', None))
setattr(self, "_{}".format('group_ids'), kwargs.get('group_ids', None))
setattr(self, "_{}".format('include_certificate_of_completion'), kwargs.get('include_certificate_of_completion', None))
setattr(self, "_{}".format('include_cert_soap_header'), kwargs.get('include_cert_soap_header', None))
setattr(self, "_{}".format('include_document_fields'), kwargs.get('include_document_fields', None))
setattr(self, "_{}".format('include_documents'), kwargs.get('include_documents', None))
setattr(self, "_{}".format('include_envelope_void_reason'), kwargs.get('include_envelope_void_reason', None))
setattr(self, "_{}".format('include_hmac'), kwargs.get('include_hmac', None))
setattr(self, "_{}".format('include_sender_accountas_custom_field'), kwargs.get('include_sender_accountas_custom_field', None))
setattr(self, "_{}".format('include_time_zone_information'), kwargs.get('include_time_zone_information', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('password'), kwargs.get('password', None))
setattr(self, "_{}".format('recipient_events'), kwargs.get('recipient_events', None))
setattr(self, "_{}".format('require_mutual_tls'), kwargs.get('require_mutual_tls', None))
setattr(self, "_{}".format('requires_acknowledgement'), kwargs.get('requires_acknowledgement', None))
setattr(self, "_{}".format('salesforce_api_version'), kwargs.get('salesforce_api_version', None))
setattr(self, "_{}".format('salesforce_authcode'), kwargs.get('salesforce_authcode', None))
setattr(self, "_{}".format('salesforce_call_back_url'), kwargs.get('salesforce_call_back_url', None))
setattr(self, "_{}".format('salesforce_documents_as_content_files'), kwargs.get('salesforce_documents_as_content_files', None))
setattr(self, "_{}".format('sender_override'), kwargs.get('sender_override', None))
setattr(self, "_{}".format('sender_selectable_items'), kwargs.get('sender_selectable_items', None))
setattr(self, "_{}".format('sf_objects'), kwargs.get('sf_objects', None))
setattr(self, "_{}".format('sign_message_with_x509_certificate'), kwargs.get('sign_message_with_x509_certificate', None))
setattr(self, "_{}".format('soap_namespace'), kwargs.get('soap_namespace', None))
setattr(self, "_{}".format('url_to_publish_to'), kwargs.get('url_to_publish_to', None))
setattr(self, "_{}".format('user_ids'), kwargs.get('user_ids', None))
setattr(self, "_{}".format('user_name'), kwargs.get('user_name', None))
setattr(self, "_{}".format('use_soap_interface'), kwargs.get('use_soap_interface', None))
@property
def allow_envelope_publish(self):
"""Gets the allow_envelope_publish of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, data is sent to the urlToPublishTo web address. This option can be set to false to stop sending data while maintaining the Connect configuration information. # noqa: E501
:return: The allow_envelope_publish of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._allow_envelope_publish
@allow_envelope_publish.setter
def allow_envelope_publish(self, allow_envelope_publish):
"""Sets the allow_envelope_publish of this ConnectCustomConfiguration.
When set to **true**, data is sent to the urlToPublishTo web address. This option can be set to false to stop sending data while maintaining the Connect configuration information. # noqa: E501
:param allow_envelope_publish: The allow_envelope_publish of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._allow_envelope_publish = allow_envelope_publish
@property
def allow_salesforce_publish(self):
"""Gets the allow_salesforce_publish of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The allow_salesforce_publish of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._allow_salesforce_publish
@allow_salesforce_publish.setter
def allow_salesforce_publish(self, allow_salesforce_publish):
"""Sets the allow_salesforce_publish of this ConnectCustomConfiguration.
# noqa: E501
:param allow_salesforce_publish: The allow_salesforce_publish of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._allow_salesforce_publish = allow_salesforce_publish
@property
def all_users(self):
"""Gets the all_users of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, the tracked envelope and recipient events for all users, including users that are added a later time, are sent through Connect. # noqa: E501
:return: The all_users of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._all_users
@all_users.setter
def all_users(self, all_users):
"""Sets the all_users of this ConnectCustomConfiguration.
When set to **true**, the tracked envelope and recipient events for all users, including users that are added a later time, are sent through Connect. # noqa: E501
:param all_users: The all_users of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._all_users = all_users
@property
def all_users_except(self):
"""Gets the all_users_except of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The all_users_except of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._all_users_except
@all_users_except.setter
def all_users_except(self, all_users_except):
"""Sets the all_users_except of this ConnectCustomConfiguration.
# noqa: E501
:param all_users_except: The all_users_except of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._all_users_except = all_users_except
@property
def configuration_type(self):
"""Gets the configuration_type of this ConnectCustomConfiguration. # noqa: E501
If merge field's are being used, specifies the type of the merge field. The only supported value is **salesforce**. # noqa: E501
:return: The configuration_type of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._configuration_type
@configuration_type.setter
def configuration_type(self, configuration_type):
"""Sets the configuration_type of this ConnectCustomConfiguration.
If merge field's are being used, specifies the type of the merge field. The only supported value is **salesforce**. # noqa: E501
:param configuration_type: The configuration_type of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._configuration_type = configuration_type
@property
def connect_id(self):
"""Gets the connect_id of this ConnectCustomConfiguration. # noqa: E501
Specifies the DocuSign generated ID for the Connect configuration. # noqa: E501
:return: The connect_id of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._connect_id
@connect_id.setter
def connect_id(self, connect_id):
"""Sets the connect_id of this ConnectCustomConfiguration.
Specifies the DocuSign generated ID for the Connect configuration. # noqa: E501
:param connect_id: The connect_id of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._connect_id = connect_id
@property
def delivery_mode(self):
"""Gets the delivery_mode of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The delivery_mode of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._delivery_mode
@delivery_mode.setter
def delivery_mode(self, delivery_mode):
"""Sets the delivery_mode of this ConnectCustomConfiguration.
# noqa: E501
:param delivery_mode: The delivery_mode of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._delivery_mode = delivery_mode
@property
def enable_log(self):
"""Gets the enable_log of this ConnectCustomConfiguration. # noqa: E501
This turns Connect logging on or off. When set to **true**, logging is turned on. # noqa: E501
:return: The enable_log of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._enable_log
@enable_log.setter
def enable_log(self, enable_log):
"""Sets the enable_log of this ConnectCustomConfiguration.
This turns Connect logging on or off. When set to **true**, logging is turned on. # noqa: E501
:param enable_log: The enable_log of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._enable_log = enable_log
@property
def envelope_events(self):
"""Gets the envelope_events of this ConnectCustomConfiguration. # noqa: E501
A comma separated list of �Envelope� related events that are tracked through Connect. The possible event values are: Sent, Delivered, Completed, Declined, and Voided. # noqa: E501
:return: The envelope_events of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._envelope_events
@envelope_events.setter
def envelope_events(self, envelope_events):
"""Sets the envelope_events of this ConnectCustomConfiguration.
A comma separated list of �Envelope� related events that are tracked through Connect. The possible event values are: Sent, Delivered, Completed, Declined, and Voided. # noqa: E501
:param envelope_events: The envelope_events of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._envelope_events = envelope_events
@property
def event_data(self):
"""Gets the event_data of this ConnectCustomConfiguration. # noqa: E501
:return: The event_data of this ConnectCustomConfiguration. # noqa: E501
:rtype: ConnectEventData
"""
return self._event_data
@event_data.setter
def event_data(self, event_data):
"""Sets the event_data of this ConnectCustomConfiguration.
:param event_data: The event_data of this ConnectCustomConfiguration. # noqa: E501
:type: ConnectEventData
"""
self._event_data = event_data
@property
def events(self):
"""Gets the events of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The events of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._events
@events.setter
def events(self, events):
"""Sets the events of this ConnectCustomConfiguration.
# noqa: E501
:param events: The events of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._events = events
@property
def external_folder_id(self):
"""Gets the external_folder_id of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The external_folder_id of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._external_folder_id
@external_folder_id.setter
def external_folder_id(self, external_folder_id):
"""Sets the external_folder_id of this ConnectCustomConfiguration.
# noqa: E501
:param external_folder_id: The external_folder_id of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._external_folder_id = external_folder_id
@property
def external_folder_label(self):
"""Gets the external_folder_label of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The external_folder_label of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._external_folder_label
@external_folder_label.setter
def external_folder_label(self, external_folder_label):
"""Sets the external_folder_label of this ConnectCustomConfiguration.
# noqa: E501
:param external_folder_label: The external_folder_label of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._external_folder_label = external_folder_label
@property
def group_ids(self):
"""Gets the group_ids of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The group_ids of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._group_ids
@group_ids.setter
def group_ids(self, group_ids):
"""Sets the group_ids of this ConnectCustomConfiguration.
# noqa: E501
:param group_ids: The group_ids of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._group_ids = group_ids
@property
def include_certificate_of_completion(self):
"""Gets the include_certificate_of_completion of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, the Connect Service includes the Certificate of Completion with completed envelopes. # noqa: E501
:return: The include_certificate_of_completion of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_certificate_of_completion
@include_certificate_of_completion.setter
def include_certificate_of_completion(self, include_certificate_of_completion):
"""Sets the include_certificate_of_completion of this ConnectCustomConfiguration.
When set to **true**, the Connect Service includes the Certificate of Completion with completed envelopes. # noqa: E501
:param include_certificate_of_completion: The include_certificate_of_completion of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_certificate_of_completion = include_certificate_of_completion
@property
def include_cert_soap_header(self):
"""Gets the include_cert_soap_header of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The include_cert_soap_header of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_cert_soap_header
@include_cert_soap_header.setter
def include_cert_soap_header(self, include_cert_soap_header):
"""Sets the include_cert_soap_header of this ConnectCustomConfiguration.
# noqa: E501
:param include_cert_soap_header: The include_cert_soap_header of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_cert_soap_header = include_cert_soap_header
@property
def include_document_fields(self):
"""Gets the include_document_fields of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, the Document Fields associated with envelope documents are included in the data. Document Fields are optional custom name-value pairs added to documents using the API. # noqa: E501
:return: The include_document_fields of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_document_fields
@include_document_fields.setter
def include_document_fields(self, include_document_fields):
"""Sets the include_document_fields of this ConnectCustomConfiguration.
When set to **true**, the Document Fields associated with envelope documents are included in the data. Document Fields are optional custom name-value pairs added to documents using the API. # noqa: E501
:param include_document_fields: The include_document_fields of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_document_fields = include_document_fields
@property
def include_documents(self):
"""Gets the include_documents of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, Connect will send the PDF document along with the update XML. # noqa: E501
:return: The include_documents of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_documents
@include_documents.setter
def include_documents(self, include_documents):
"""Sets the include_documents of this ConnectCustomConfiguration.
When set to **true**, Connect will send the PDF document along with the update XML. # noqa: E501
:param include_documents: The include_documents of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_documents = include_documents
@property
def include_envelope_void_reason(self):
"""Gets the include_envelope_void_reason of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, Connect will include the voidedReason for voided envelopes. # noqa: E501
:return: The include_envelope_void_reason of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_envelope_void_reason
@include_envelope_void_reason.setter
def include_envelope_void_reason(self, include_envelope_void_reason):
"""Sets the include_envelope_void_reason of this ConnectCustomConfiguration.
When set to **true**, Connect will include the voidedReason for voided envelopes. # noqa: E501
:param include_envelope_void_reason: The include_envelope_void_reason of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_envelope_void_reason = include_envelope_void_reason
@property
def include_hmac(self):
"""Gets the include_hmac of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The include_hmac of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_hmac
@include_hmac.setter
def include_hmac(self, include_hmac):
"""Sets the include_hmac of this ConnectCustomConfiguration.
# noqa: E501
:param include_hmac: The include_hmac of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_hmac = include_hmac
@property
def include_sender_accountas_custom_field(self):
"""Gets the include_sender_accountas_custom_field of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, Connect will include the sender account as Custom Field in the data. # noqa: E501
:return: The include_sender_accountas_custom_field of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_sender_accountas_custom_field
@include_sender_accountas_custom_field.setter
def include_sender_accountas_custom_field(self, include_sender_accountas_custom_field):
"""Sets the include_sender_accountas_custom_field of this ConnectCustomConfiguration.
When set to **true**, Connect will include the sender account as Custom Field in the data. # noqa: E501
:param include_sender_accountas_custom_field: The include_sender_accountas_custom_field of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_sender_accountas_custom_field = include_sender_accountas_custom_field
@property
def include_time_zone_information(self):
"""Gets the include_time_zone_information of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, Connect will include the envelope time zone information. # noqa: E501
:return: The include_time_zone_information of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._include_time_zone_information
@include_time_zone_information.setter
def include_time_zone_information(self, include_time_zone_information):
"""Sets the include_time_zone_information of this ConnectCustomConfiguration.
When set to **true**, Connect will include the envelope time zone information. # noqa: E501
:param include_time_zone_information: The include_time_zone_information of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._include_time_zone_information = include_time_zone_information
@property
def name(self):
"""Gets the name of this ConnectCustomConfiguration. # noqa: E501
The name of the Connect configuration. The name helps identify the configuration in the list. # noqa: E501
:return: The name of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ConnectCustomConfiguration.
The name of the Connect configuration. The name helps identify the configuration in the list. # noqa: E501
:param name: The name of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._name = name
@property
def password(self):
"""Gets the password of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The password of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this ConnectCustomConfiguration.
# noqa: E501
:param password: The password of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._password = password
@property
def recipient_events(self):
"""Gets the recipient_events of this ConnectCustomConfiguration. # noqa: E501
A comma separated list of �Recipient� related events that are tracked through Connect. The possible event values are: Sent, Delivered, Completed, Declined, AuthenticationFailed, and AutoResponded. # noqa: E501
:return: The recipient_events of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._recipient_events
@recipient_events.setter
def recipient_events(self, recipient_events):
"""Sets the recipient_events of this ConnectCustomConfiguration.
A comma separated list of �Recipient� related events that are tracked through Connect. The possible event values are: Sent, Delivered, Completed, Declined, AuthenticationFailed, and AutoResponded. # noqa: E501
:param recipient_events: The recipient_events of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._recipient_events = recipient_events
@property
def require_mutual_tls(self):
"""Gets the require_mutual_tls of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The require_mutual_tls of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._require_mutual_tls
@require_mutual_tls.setter
def require_mutual_tls(self, require_mutual_tls):
"""Sets the require_mutual_tls of this ConnectCustomConfiguration.
# noqa: E501
:param require_mutual_tls: The require_mutual_tls of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._require_mutual_tls = require_mutual_tls
@property
def requires_acknowledgement(self):
"""Gets the requires_acknowledgement of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, and a publication message fails to be acknowledged, the message goes back into the queue and the system will retry delivery after a successful acknowledgement is received. If the delivery fails a second time, the message is not returned to the queue for sending until Connect receives a successful acknowledgement and it has been at least 24 hours since the previous retry. There is a maximum of ten retries Alternately, you can use Republish Connect Information to manually republish the envelope information. # noqa: E501
:return: The requires_acknowledgement of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._requires_acknowledgement
@requires_acknowledgement.setter
def requires_acknowledgement(self, requires_acknowledgement):
"""Sets the requires_acknowledgement of this ConnectCustomConfiguration.
When set to **true**, and a publication message fails to be acknowledged, the message goes back into the queue and the system will retry delivery after a successful acknowledgement is received. If the delivery fails a second time, the message is not returned to the queue for sending until Connect receives a successful acknowledgement and it has been at least 24 hours since the previous retry. There is a maximum of ten retries Alternately, you can use Republish Connect Information to manually republish the envelope information. # noqa: E501
:param requires_acknowledgement: The requires_acknowledgement of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._requires_acknowledgement = requires_acknowledgement
@property
def salesforce_api_version(self):
"""Gets the salesforce_api_version of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The salesforce_api_version of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._salesforce_api_version
@salesforce_api_version.setter
def salesforce_api_version(self, salesforce_api_version):
"""Sets the salesforce_api_version of this ConnectCustomConfiguration.
# noqa: E501
:param salesforce_api_version: The salesforce_api_version of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._salesforce_api_version = salesforce_api_version
@property
def salesforce_authcode(self):
"""Gets the salesforce_authcode of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The salesforce_authcode of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._salesforce_authcode
@salesforce_authcode.setter
def salesforce_authcode(self, salesforce_authcode):
"""Sets the salesforce_authcode of this ConnectCustomConfiguration.
# noqa: E501
:param salesforce_authcode: The salesforce_authcode of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._salesforce_authcode = salesforce_authcode
@property
def salesforce_call_back_url(self):
"""Gets the salesforce_call_back_url of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The salesforce_call_back_url of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._salesforce_call_back_url
@salesforce_call_back_url.setter
def salesforce_call_back_url(self, salesforce_call_back_url):
"""Sets the salesforce_call_back_url of this ConnectCustomConfiguration.
# noqa: E501
:param salesforce_call_back_url: The salesforce_call_back_url of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._salesforce_call_back_url = salesforce_call_back_url
@property
def salesforce_documents_as_content_files(self):
"""Gets the salesforce_documents_as_content_files of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The salesforce_documents_as_content_files of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._salesforce_documents_as_content_files
@salesforce_documents_as_content_files.setter
def salesforce_documents_as_content_files(self, salesforce_documents_as_content_files):
"""Sets the salesforce_documents_as_content_files of this ConnectCustomConfiguration.
# noqa: E501
:param salesforce_documents_as_content_files: The salesforce_documents_as_content_files of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._salesforce_documents_as_content_files = salesforce_documents_as_content_files
@property
def sender_override(self):
"""Gets the sender_override of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The sender_override of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._sender_override
@sender_override.setter
def sender_override(self, sender_override):
"""Sets the sender_override of this ConnectCustomConfiguration.
# noqa: E501
:param sender_override: The sender_override of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._sender_override = sender_override
@property
def sender_selectable_items(self):
"""Gets the sender_selectable_items of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The sender_selectable_items of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._sender_selectable_items
@sender_selectable_items.setter
def sender_selectable_items(self, sender_selectable_items):
"""Sets the sender_selectable_items of this ConnectCustomConfiguration.
# noqa: E501
:param sender_selectable_items: The sender_selectable_items of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._sender_selectable_items = sender_selectable_items
@property
def sf_objects(self):
"""Gets the sf_objects of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The sf_objects of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[ConnectSalesforceObject]
"""
return self._sf_objects
@sf_objects.setter
def sf_objects(self, sf_objects):
"""Sets the sf_objects of this ConnectCustomConfiguration.
# noqa: E501
:param sf_objects: The sf_objects of this ConnectCustomConfiguration. # noqa: E501
:type: list[ConnectSalesforceObject]
"""
self._sf_objects = sf_objects
@property
def sign_message_with_x509_certificate(self):
"""Gets the sign_message_with_x509_certificate of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, Connect messages are signed with an X509 certificate. This provides support for 2-way SSL. # noqa: E501
:return: The sign_message_with_x509_certificate of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._sign_message_with_x509_certificate
@sign_message_with_x509_certificate.setter
def sign_message_with_x509_certificate(self, sign_message_with_x509_certificate):
"""Sets the sign_message_with_x509_certificate of this ConnectCustomConfiguration.
When set to **true**, Connect messages are signed with an X509 certificate. This provides support for 2-way SSL. # noqa: E501
:param sign_message_with_x509_certificate: The sign_message_with_x509_certificate of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._sign_message_with_x509_certificate = sign_message_with_x509_certificate
@property
def soap_namespace(self):
"""Gets the soap_namespace of this ConnectCustomConfiguration. # noqa: E501
The namespace of the SOAP interface. The namespace value must be set if useSoapInterface is set to true. # noqa: E501
:return: The soap_namespace of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._soap_namespace
@soap_namespace.setter
def soap_namespace(self, soap_namespace):
"""Sets the soap_namespace of this ConnectCustomConfiguration.
The namespace of the SOAP interface. The namespace value must be set if useSoapInterface is set to true. # noqa: E501
:param soap_namespace: The soap_namespace of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._soap_namespace = soap_namespace
@property
def url_to_publish_to(self):
"""Gets the url_to_publish_to of this ConnectCustomConfiguration. # noqa: E501
This is the web address and name of your listener or Retrieving Service endpoint. You need to include HTTPS:// in the web address. # noqa: E501
:return: The url_to_publish_to of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._url_to_publish_to
@url_to_publish_to.setter
def url_to_publish_to(self, url_to_publish_to):
"""Sets the url_to_publish_to of this ConnectCustomConfiguration.
This is the web address and name of your listener or Retrieving Service endpoint. You need to include HTTPS:// in the web address. # noqa: E501
:param url_to_publish_to: The url_to_publish_to of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._url_to_publish_to = url_to_publish_to
@property
def user_ids(self):
"""Gets the user_ids of this ConnectCustomConfiguration. # noqa: E501
A comma separated list of userIds. This sets the users associated with the tracked envelope and recipient events. When one of the event occurs for a set user, the information is sent through Connect. ###### Note: If allUsers is set to �false� then you must provide a list of user id�s. # noqa: E501
:return: The user_ids of this ConnectCustomConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._user_ids
@user_ids.setter
def user_ids(self, user_ids):
"""Sets the user_ids of this ConnectCustomConfiguration.
A comma separated list of userIds. This sets the users associated with the tracked envelope and recipient events. When one of the event occurs for a set user, the information is sent through Connect. ###### Note: If allUsers is set to �false� then you must provide a list of user id�s. # noqa: E501
:param user_ids: The user_ids of this ConnectCustomConfiguration. # noqa: E501
:type: list[str]
"""
self._user_ids = user_ids
@property
def user_name(self):
"""Gets the user_name of this ConnectCustomConfiguration. # noqa: E501
# noqa: E501
:return: The user_name of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this ConnectCustomConfiguration.
# noqa: E501
:param user_name: The user_name of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._user_name = user_name
@property
def use_soap_interface(self):
"""Gets the use_soap_interface of this ConnectCustomConfiguration. # noqa: E501
When set to **true**, indicates that the `urlToPublishTo` property contains a SOAP endpoint. # noqa: E501
:return: The use_soap_interface of this ConnectCustomConfiguration. # noqa: E501
:rtype: str
"""
return self._use_soap_interface
@use_soap_interface.setter
def use_soap_interface(self, use_soap_interface):
"""Sets the use_soap_interface of this ConnectCustomConfiguration.
When set to **true**, indicates that the `urlToPublishTo` property contains a SOAP endpoint. # noqa: E501
:param use_soap_interface: The use_soap_interface of this ConnectCustomConfiguration. # noqa: E501
:type: str
"""
self._use_soap_interface = use_soap_interface
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConnectCustomConfiguration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConnectCustomConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConnectCustomConfiguration):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "6f02a0f5c7cb0a2faa755a8acf153773",
"timestamp": "",
"source": "github",
"line_count": 1173,
"max_line_length": 554,
"avg_line_length": 39.40920716112532,
"alnum_prop": 0.6530599000584074,
"repo_name": "docusign/docusign-python-client",
"id": "1a646ceaf2b5beef6597778de397d228ff11ceb7",
"size": "46286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/models/connect_custom_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
} |
"""Set of basic operations/utilities that are used by repacakging tool.
These functions were mostly imported from build/scripts/common/chromium_utils
and build/scripts/common/slave_utils.
"""
import errno
import os
import re
import shutil
import subprocess
import sys
import time
import zipfile
CREDENTIAL_ERROR_MESSAGE = ('You are attempting to access protected data with '
'no configured credentials')
class ExternalError(Exception):
pass
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
WIN_LINK_FUNC = None
try:
if sys.platform.startswith('win'):
import ctypes
# There's 4 possibilities on Windows for links:
# 1. Symbolic file links;
# 2. Symbolic directory links;
# 3. Hardlinked files;
# 4. Junctioned directories.
# (Hardlinked directories don't really exist.)
#
# 7-Zip does not handle symbolic file links as we want (it puts the
# content of the link, not what it refers to, and reports "CRC Error" on
# extraction). It does work as expected for symbolic directory links.
# Because the majority of the large files are in the root of the staging
# directory, we do however need to handle file links, so we do this with
# hardlinking. Junctioning requires a huge whack of code, so we take the
# slightly odd tactic of using #2 and #3, but not #1 and #4. That is,
# hardlinks for files, but symbolic links for directories.
def _WIN_LINK_FUNC(src, dst):
print 'linking %s -> %s' % (src, dst)
if os.path.isdir(src):
if not ctypes.windll.kernel32.CreateSymbolicLinkA(
str(dst), str(os.path.abspath(src)), 1):
raise ctypes.WinError()
else:
if not ctypes.windll.kernel32.CreateHardLinkA(str(dst), str(src), 0):
raise ctypes.WinError()
WIN_LINK_FUNC = _WIN_LINK_FUNC
except ImportError:
# If we don't have ctypes or aren't on Windows, leave WIN_LINK_FUNC as None.
pass
class PathNotFound(Exception):
pass
def IsGitCommitHash(regex_match):
"""Checks if match is correct SHA1 hash."""
matched_re = re.match(r'^[0-9,A-F]{40}$', regex_match.upper())
if matched_re: return True
return False
def IsCommitPosition(regex_match):
"""Checks if match is correct revision(Cp number) format."""
matched_re = re.match(r'^[0-9]{6}$', regex_match)
if matched_re: return True
return False
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def RemovePath(*path):
"""Removes the file or directory at 'path', if it exists."""
file_path = os.path.join(*path)
if os.path.exists(file_path):
if os.path.isdir(file_path):
RemoveDirectory(file_path)
else:
RemoveFile(file_path)
def MoveFile(path, new_path):
"""Moves the file located at 'path' to 'new_path', if it exists."""
try:
RemoveFile(new_path)
os.rename(path, new_path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def RemoveFile(*path):
"""Removes the file located at 'path', if it exists."""
file_path = os.path.join(*path)
try:
os.remove(file_path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def CheckDepotToolsInPath():
delimiter = ';' if sys.platform.startswith('win') else ':'
path_list = os.environ['PATH'].split(delimiter)
for path in path_list:
if path.rstrip(os.path.sep).endswith('depot_tools'):
return path
return None
def RunGsutilCommand(args):
gsutil_path = CheckDepotToolsInPath()
if gsutil_path is None:
print ('Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(gsutil_path, 'third_party', 'gsutil', 'gsutil')
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=None)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
if (re.findall(r'status[ |=]40[1|3]', stderr) or
stderr.startswith(CREDENTIAL_ERROR_MESSAGE)):
print ('Follow these steps to configure your credentials and try'
' running the bisect-builds.py again.:\n'
' 1. Run "python %s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that account.\n'
' 3. For the project-id, just enter 0.' % gsutil_path)
sys.exit(1)
else:
raise Exception('Error running the gsutil command: %s' % stderr)
return stdout
def GSutilList(bucket):
query = '%s/' %(bucket)
stdout = RunGsutilCommand(['ls', query])
return [url[len(query):].strip('/') for url in stdout.splitlines()]
def GSUtilDownloadFile(src, dst):
command = ['cp', src, dst]
return RunGsutilCommand(command)
def GSUtilCopy(source, dest):
if not source.startswith('gs://') and not source.startswith('file://'):
source = 'file://' + source
if not dest.startswith('gs://') and not dest.startswith('file://'):
dest = 'file://' + dest
command = ['cp']
command.extend([source, dest])
return RunGsutilCommand(command)
def RunCommand(cmd, cwd=None):
"""Runs the given command and returns the exit code.
Args:
cmd: list of command arguments.
cwd: working directory to execute the command, or None if the current
working directory should be used.
Returns:
The exit code of the command.
"""
process = subprocess.Popen(cmd, cwd=cwd)
process.wait()
return process.returncode
def CopyFileToDir(src_path, dest_dir, dest_fn=None, link_ok=False):
"""Copies the file found at src_path to the dest_dir directory, with metadata.
If dest_fn is specified, the src_path is copied to that name in dest_dir,
otherwise it is copied to a file of the same name.
Raises PathNotFound if either the file or the directory is not found.
"""
# Verify the file and directory separately so we can tell them apart and
# raise PathNotFound rather than shutil.copyfile's IOError.
if not os.path.isfile(src_path):
raise PathNotFound('Unable to find file %s' % src_path)
if not os.path.isdir(dest_dir):
raise PathNotFound('Unable to find dir %s' % dest_dir)
src_file = os.path.basename(src_path)
if dest_fn:
# If we have ctypes and the caller doesn't mind links, use that to
# try to make the copy faster on Windows. http://crbug.com/418702.
if link_ok and WIN_LINK_FUNC:
WIN_LINK_FUNC(src_path, os.path.join(dest_dir, dest_fn))
else:
shutil.copy2(src_path, os.path.join(dest_dir, dest_fn))
else:
shutil.copy2(src_path, os.path.join(dest_dir, src_file))
def RemoveDirectory(*path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at *path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
file_path = os.path.join(*path)
if not os.path.exists(file_path):
return
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for _ in xrange(3):
print 'RemoveDirectory running %s' % (' '.join(
['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))
if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
break
print ' Failed'
time.sleep(3)
return
def RemoveWithRetry_non_win(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
remove_with_retry = RemoveWithRetry_non_win
def RmTreeOnError(function, path, excinfo):
r"""This works around a problem whereby python 2.x on Windows has no ability
to check for symbolic links. os.path.islink always returns False. But
shutil.rmtree will fail if invoked on a symbolic link whose target was
deleted before the link. E.g., reproduce like this:
> mkdir test
> mkdir test\1
> mklink /D test\current test\1
> python -c "import chromium_utils; chromium_utils.RemoveDirectory('test')"
To avoid this issue, we pass this error-handling function to rmtree. If
we see the exact sort of failure, we ignore it. All other failures we re-
raise.
"""
exception_type = excinfo[0]
exception_value = excinfo[1]
# If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
# fail with a WindowsError exception with an ENOENT errno (i.e., file not
# found). We'll ignore that error. Note that WindowsError is not defined
# for non-Windows platforms, so we use OSError (of which it is a subclass)
# to avoid lint complaints about an undefined global on non-Windows
# platforms.
if (function is os.listdir) and issubclass(exception_type, OSError):
if exception_value.errno == errno.ENOENT:
# File does not exist, and we're trying to delete, so we can ignore the
# failure.
print 'WARNING: Failed to list %s during rmtree. Ignoring.\n' % path
else:
raise
else:
raise
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),
os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
def MakeZip(output_dir, archive_name, file_list, file_relative_dir,
raise_error=True, remove_archive_directory=True, strip_files=None,
ignore_sub_folder=False):
"""Packs files into a new zip archive.
Files are first copied into a directory within the output_dir named for
the archive_name, which will be created if necessary and emptied if it
already exists. The files are then then packed using archive names
relative to the output_dir. That is, if the zipfile is unpacked in place,
it will create a directory identical to the new archive_name directory, in
the output_dir. The zip file will be named as the archive_name, plus
'.zip'.
Args:
output_dir: Absolute path to the directory in which the archive is to
be created.
archive_dir: Subdirectory of output_dir holding files to be added to
the new zipfile.
file_list: List of paths to files or subdirectories, relative to the
file_relative_dir.
file_relative_dir: Absolute path to the directory containing the files
and subdirectories in the file_list.
raise_error: Whether to raise a PathNotFound error if one of the files in
the list is not found.
remove_archive_directory: Whether to remove the archive staging directory
before copying files over to it.
strip_files: List of executable files to strip symbols when zipping
Returns:
A tuple consisting of (archive_dir, zip_file_path), where archive_dir
is the full path to the newly created archive_name subdirectory.
Raises:
PathNotFound if any of the files in the list is not found, unless
raise_error is False, in which case the error will be ignored.
"""
start_time = time.clock()
# Collect files into the archive directory.
archive_dir = os.path.join(output_dir, archive_name)
print 'output_dir: %s, archive_name: %s' % (output_dir, archive_name)
print 'archive_dir: %s, remove_archive_directory: %s, exists: %s' % (
archive_dir, remove_archive_directory, os.path.exists(archive_dir))
if remove_archive_directory and os.path.exists(archive_dir):
# Move it even if it's not a directory as expected. This can happen with
# FILES.cfg archive creation where we create an archive staging directory
# that is the same name as the ultimate archive name.
if not os.path.isdir(archive_dir):
print 'Moving old "%s" file to create same name directory.' % archive_dir
previous_archive_file = '%s.old' % archive_dir
MoveFile(archive_dir, previous_archive_file)
else:
print 'Removing %s' % archive_dir
RemoveDirectory(archive_dir)
print 'Now, os.path.exists(%s): %s' % (
archive_dir, os.path.exists(archive_dir))
MaybeMakeDirectory(archive_dir)
for needed_file in file_list:
needed_file = needed_file.rstrip()
# These paths are relative to the file_relative_dir. We need to copy
# them over maintaining the relative directories, where applicable.
src_path = os.path.join(file_relative_dir, needed_file)
dirname, basename = os.path.split(needed_file)
try:
if os.path.isdir(src_path):
if WIN_LINK_FUNC:
WIN_LINK_FUNC(src_path, os.path.join(archive_dir, needed_file))
else:
shutil.copytree(src_path, os.path.join(archive_dir, needed_file),
symlinks=True)
elif dirname != '' and basename != '':
dest_dir = os.path.join(archive_dir, dirname)
MaybeMakeDirectory(dest_dir)
CopyFileToDir(src_path, dest_dir, basename, link_ok=True)
if strip_files and basename in strip_files:
cmd = ['strip', os.path.join(dest_dir, basename)]
RunCommand(cmd)
else:
CopyFileToDir(src_path, archive_dir, basename, link_ok=True)
if strip_files and basename in strip_files:
cmd = ['strip', os.path.join(archive_dir, basename)]
RunCommand(cmd)
except PathNotFound:
if raise_error:
raise
end_time = time.clock()
print 'Took %f seconds to create archive directory.' % (end_time - start_time)
# Pack the zip file.
output_file = '%s.zip' % archive_dir
previous_file = '%s_old.zip' % archive_dir
MoveFile(output_file, previous_file)
# If we have 7z, use that as it's much faster. See http://crbug.com/418702.
windows_zip_cmd = None
if os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
windows_zip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'a', '-y', '-mx1']
# On Windows we use the python zip module; on Linux and Mac, we use the zip
# command as it will handle links and file bits (executable). Which is much
# easier then trying to do that with ZipInfo options.
start_time = time.clock()
if IsWindows() and not windows_zip_cmd:
print 'Creating %s' % output_file
def _Addfiles(to_zip_file, dirname, files_to_add):
for this_file in files_to_add:
archive_name = this_file
this_path = os.path.join(dirname, this_file)
if os.path.isfile(this_path):
# Store files named relative to the outer output_dir.
archive_name = this_path.replace(output_dir + os.sep, '')
if os.path.getsize(this_path) == 0:
compress_method = zipfile.ZIP_STORED
else:
compress_method = zipfile.ZIP_DEFLATED
to_zip_file.write(this_path, archive_name, compress_method)
print 'Adding %s' % archive_name
zip_file = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True)
try:
os.path.walk(archive_dir, _Addfiles, zip_file)
finally:
zip_file.close()
else:
if IsMac() or IsLinux():
zip_cmd = ['zip', '-yr1']
else:
zip_cmd = windows_zip_cmd
if ignore_sub_folder:
zip_cmd.extend(['-j'])
saved_dir = os.getcwd()
os.chdir(os.path.dirname(archive_dir))
command = zip_cmd + [output_file, os.path.basename(archive_dir)]
result = RunCommand(command)
os.chdir(saved_dir)
if result and raise_error:
raise ExternalError('zip failed: %s => %s' %
(str(command), result))
end_time = time.clock()
print 'Took %f seconds to create zip.' % (end_time - start_time)
return (archive_dir, output_file)
def ExtractZip(filename, output_dir, extract_file_list=[], verbose=True):
"""Extract the zip archive in the output directory."""
MaybeMakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command as it will
# handle links and file bits (executable), which is much
# easier then trying to do that with ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fallback to the python zip module
# on Mac if the filesize is greater than 4GB.
#
# On Windows, try to use 7z if it is installed, otherwise fall back to python
# zip module and pray we don't have files larger than 512MB to unzip.
unzip_cmd = None
if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
or IsLinux()):
unzip_cmd = ['unzip', '-o']
elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
if unzip_cmd:
# Make sure path is absolute before changing directories.
filepath = os.path.abspath(filename)
saved_dir = os.getcwd()
os.chdir(output_dir)
command = unzip_cmd + [filepath]
command.extend(extract_file_list)
result = RunCommand(command)
os.chdir(saved_dir)
if result:
raise ExternalError('unzip failed: %s => %s' % (str(command), result))
else:
assert IsWindows() or IsMac()
zf = zipfile.ZipFile(filename)
# TODO(hinoka): This can be multiprocessed.
for name in zf.namelist():
if verbose:
print 'Extracting %s' % name
zf.extract(name, output_dir)
if IsMac():
# Restore permission bits.
os.chmod(os.path.join(output_dir, name),
zf.getinfo(name).external_attr >> 16L)
| {
"content_hash": "51576c855c7e849dc8717af9b2617c0a",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 80,
"avg_line_length": 36.92585170340681,
"alnum_prop": 0.6661782264191902,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "517f22714d74286670720a303b03d4d0e194b60c",
"size": "18593",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/bisect_repackage/bisect_repackage_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Using Flask-Restless with jQuery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This provides a simple example of using Flask-Restless on the server to
create ReSTful API endpoints and [jQuery][0] on the client to make API
requests.
This requires the following Python libraries to be installed:
* Flask
* Flask-Restless
* Flask-SQLAlchemy
To install them using ``pip``, do::
pip install Flask Flask-SQLAlchemy Flask-Restless
To use this example, run this package from the command-line. If you are
using Python 2.7 or later::
python -m jquery
If you are using Python 2.6 or earlier::
python -m jquery.__main__
To view the example in action, direct your web browser to
``http://localhost:5000``. You must have JavaScript enabled in your web
browser for this example to work.
:copyright: 2012 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>
:license: GNU AGPLv3+ or BSD
"""
import os
import os.path
from flask import Flask, render_template
from flask.ext.restless import APIManager
from flask.ext.sqlalchemy import SQLAlchemy
# Step 0: the database in this example is at './test.sqlite'.
DATABASE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test.sqlite')
if os.path.exists(DATABASE):
os.unlink(DATABASE)
# Step 1: setup the Flask application.
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['TESTING'] = True
app.config['SECRET_KEY'] = os.urandom(24)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % DATABASE
# Step 2: initialize extensions.
db = SQLAlchemy(app)
api_manager = APIManager(app, flask_sqlalchemy_db=db)
# Step 3: create the database model.
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode)
# Step 4: create the database and add some test people.
db.create_all()
for i in range(1, 10):
person = Person(name=u'person' + unicode(i))
db.session.add(person)
db.session.commit()
print Person.query.all()
# Step 5: create endpoints for the application.
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
# Step 6: create the API endpoints.
api_manager.create_api(Person, methods=['GET'])
# Step 7: run the application.
app.run()
| {
"content_hash": "2e4c702f5d9e1c26e1787228f236cbca",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 27.5,
"alnum_prop": 0.6813852813852814,
"repo_name": "ternaris/flask-restless",
"id": "496af92c6bc886f55fe3fd5ceee0480b04cd0974",
"size": "2310",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/clients/jquery/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2565"
},
{
"name": "Python",
"bytes": "380919"
},
{
"name": "Shell",
"bytes": "1977"
}
],
"symlink_target": ""
} |
"""Regression test for trailing-whitespace (C0303)."""
# pylint: disable=mixed-line-endings, print-statement
__revision__ = 0
print 'some trailing whitespace'
print 'trailing whitespace does not count towards the line length limit'
print 'windows line ends are ok'
print 'but trailing whitespace on win is not'
| {
"content_hash": "1138a3d8d9685ebfc89fa9587dd0c76d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 91,
"avg_line_length": 42.125,
"alnum_prop": 0.712166172106825,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "2eecb11ba4d3ecd96aecad75d5345c9789c39c7e",
"size": "337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pylint/test/input/func_trailing_whitespace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
class VGG_Model(object):
def __init__(self):
self.param_path = os.path.join(os.getcwd(), "model", "vgg-face.mat")
self.data = loadmat(self.param_path)
self.meta = self.data['meta']
self.classes = self.meta['classes']
self.class_names = self.classes[0][0]['description'][0][0]
self.normalization = self.meta['normalization']
self.layers = self.data['layers']
self.average_image = np.squeeze(self.normalization[0][0]['averageImage'][0][0][0][0])
self.image_size = np.squeeze(self.normalization[0][0]['imageSize'][0][0])
self.used = False
def vgg(self, input_maps, reuse=False):
with tf.variable_scope("vgg"):
if reuse:
tf.get_variable_scope().reuse_variables()
input_maps = input_maps - tf.constant(self.average_image)
input_maps = tf.image.resize_images(input_maps, size=[self.image_size[0], self.image_size[1]])
# read layer info
current = input_maps
network = {}
for layer in self.layers[0]:
name = layer[0]['name'][0][0]
layer_type = layer[0]['type'][0][0]
if layer_type == 'conv':
if name[:2] == 'fc':
padding = 'VALID'
else:
padding = 'SAME'
stride = layer[0]['stride'][0][0]
kernel, bias = layer[0]['weights'][0][0]
bias = np.squeeze(bias).reshape(-1)
kernel = tf.constant(kernel)
bias = tf.constant(bias)
kernel = tf.get_variable(name+"_W", initializer=kernel)
bias = tf.get_variable(name+"_b", initializer=bias)
conv = tf.nn.conv2d(current, kernel,
strides=(1, stride[0], stride[0], 1), padding=padding)
current = tf.nn.bias_add(conv, bias)
# print(name, 'stride:', stride, 'kernel size:', tf.shape(kernel))
elif layer_type == 'relu':
current = tf.nn.relu(current)
# print(name)
elif layer_type == 'pool':
stride = layer[0]['stride'][0][0]
pool = layer[0]['pool'][0][0]
current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1),
strides=(1, stride[0], stride[0], 1), padding='SAME')
# print(name, 'stride:', stride)
elif layer_type == 'softmax':
current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)]))
# print(name)
network[name] = current
return network["conv4_3"], network["conv5_3"]
def vgg_loss(self, a, b):
if self.used == False:
conv4_a, conv5_a = self.vgg(a, reuse=False)
self.used = True
else:
conv4_a, conv5_a = self.vgg(a, reuse=True)
conv4_b, conv5_b = self.vgg(b, reuse=True)
return tf.reduce_mean(tf.abs(conv4_a - conv4_b)) + \
tf.reduce_mean(tf.abs(conv5_a - conv5_b))
| {
"content_hash": "7d049a50816c5e9b7f913d83c7203291",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 106,
"avg_line_length": 44.02597402597402,
"alnum_prop": 0.48436578171091443,
"repo_name": "xhchrn/gegan",
"id": "48e0534d77f95dbbc61a9527e4c3d525de14725f",
"size": "3414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/vgg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56357"
}
],
"symlink_target": ""
} |
from nose import tools
import subprocess
import tempfile
import os
import shutil
import sys
import data_packager as pkg
TESTDIR = os.path.dirname(os.path.realpath(__file__))
BASEDIR = os.path.dirname(os.path.dirname(TESTDIR))
FIXTURE = os.path.join(TESTDIR, 'test-fixture-package')
FIXTURE_ALTPATH = 'alt_resources'
ENV = {'PYTHONPATH': BASEDIR}
def in_tempdir(function):
def wrapper(*args, **kw):
path = tempfile.mkdtemp()
cwd = os.path.realpath('.')
try:
os.chdir(path)
function(*args, **kw)
finally:
shutil.rmtree(path)
os.chdir(cwd)
wrapper.__name__ = function.__name__
return wrapper
def with_virtualenv(function):
@in_tempdir
def wrapper(*args, **kw):
venv = os.path.realpath('test-virtualenv')
subprocess.check_call([
sys.executable,
'-c',
"import sys; import pkg_resources; sys.exit(pkg_resources.load_entry_point('virtualenv', 'console_scripts', 'virtualenv')())",
venv,
])
os.chdir(BASEDIR)
subprocess.check_call([
vpython(venv),
'setup.py',
'develop',
])
os.chdir(os.path.realpath(os.path.join(venv, '..')))
return function(*(args + (venv,)), **kw)
wrapper.__name__ = function.__name__
return wrapper
def vpython(venv):
return os.path.join(venv, 'bin', 'python')
def vpip(venv):
return os.path.join(venv, 'bin', 'pip')
def with_fixture(function):
@with_virtualenv
def wrapper(*args, **kw):
venv = args[-1]
# Copy the test fixture into the tempdir,
# and symlink its FIXTURE_ALTPATH as a sibling for path-based tests.
shutil.copytree(FIXTURE, os.path.basename(FIXTURE))
os.symlink(os.path.join(os.path.basename(FIXTURE), FIXTURE_ALTPATH), FIXTURE_ALTPATH)
os.chdir(os.path.basename(FIXTURE))
# Uses write_manifest, write_setup, and write_module
# of the Builder class to produce files for setuptools
# to do a proper sdist.
subprocess.check_call([
vpython(venv),
'-c',
'; '.join([
'import data_packager as p',
'b = p.Builder("tfp")',
'b.write_setup(name="test-fixture-package", version="0.0.1", author="Ethan Rowe", author_email="ethan@the-rowes.com", description="Foo", long_description="fooFoo")',
'b.write_manifest()',
'b.write_module()',
]),
],
)
# Now builds the source distribution installable by pip.
subprocess.check_call([
vpython(venv),
'setup.py',
'sdist'],
)
# And install that source dist using the venv's pip.
os.chdir('..')
subprocess.check_call([
vpip(venv),
'install',
os.path.join(os.path.basename(FIXTURE), 'dist', 'test-fixture-package-0.0.1.tar.gz')],
)
return function(*args, **kw)
wrapper.__name__ = function.__name__
return wrapper
def do_operation(venv, script):
cmd = [
vpython(venv),
'-c',
'import tfp; %s' % script,
]
print "Command:", ' '.join(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = p.communicate()
if p.wait() != 0:
raise Exception, "Return code was non-zero: " + stderrdata
return stdoutdata
def resource_query(venv, function, argument):
result = do_operation(venv, "import pkg_resources as p; print repr(p.%s('tfp', '%s'))" % (function, argument))
return eval(result)
def pkg_operation(venv, script):
return do_operation(venv, 'm = tfp.AssetManager(); %s' % script)
def dir_operation(venv, path, script):
script = "m = tfp.AssetManager('%s'); %s" % (path, script)
return do_operation(venv, script)
def flex_operation(venv, script, path=None):
args = [venv, script]
op = pkg_operation
if path:
args.insert(1, path)
op = dir_operation
return op(*args)
class TestPackager(object):
@with_fixture
def test_package_filename(self, venv):
expect_a = resource_query(venv, 'resource_filename', 'assets/asset_a.txt')
expect_b = resource_query(venv, 'resource_filename', 'assets/asset_b.txt')
tools.assert_equal(
expect_a + "\n",
pkg_operation(venv, 'print m.filename("asset_a.txt")'))
tools.assert_equal(
expect_b + "\n",
pkg_operation(venv, 'print m.filename("asset_b.txt")'))
@with_fixture
def test_dir_filename(self, venv):
expect_a = os.path.realpath(os.path.join('foo', 'asset_a.txt'))
expect_b = os.path.realpath(os.path.join('foo', 'asset_b.txt'))
tools.assert_equal(
expect_a + "\n",
dir_operation(venv, 'foo', 'print m.filename("asset_a.txt")'))
tools.assert_equal(
expect_b + "\n",
dir_operation(venv, 'foo', 'print m.filename("asset_b.txt")'))
def test_exists(self):
@with_fixture
def check(p, a, x, venv):
result = flex_operation(venv, "print repr(m.exists('%s'))" % a, p)
tools.assert_equal(x, eval(result))
for asset, expectation in (('asset_a.txt', True), ('not_real.txt', False)):
for path in (None, FIXTURE_ALTPATH):
yield check, path, asset, expectation
def test_list(self):
@with_fixture
def check(expectation, path, venv):
tools.assert_equal(
sorted(expectation),
sorted(eval(flex_operation(venv, "print repr(m.list())", path))))
for exp, path in (
(['asset_a.txt', 'asset_b.txt', 'package_asset'], None),
(['asset_a.txt', 'asset_b.txt', 'directory_asset'], FIXTURE_ALTPATH)):
yield check, exp, path
@with_fixture
def test_string_pkg(self, venv):
expect = resource_query(venv, 'resource_string', 'assets/asset_a.txt')
result = flex_operation(venv, "print repr(m.string('asset_a.txt'))", None)
tools.assert_equal(expect, eval(result))
@with_fixture
def test_string_dir(self, venv):
expect = open(os.path.join(FIXTURE_ALTPATH, 'asset_a.txt'), 'rb').read()
result = flex_operation(venv, "print repr(m.string('asset_a.txt'))", FIXTURE_ALTPATH)
tools.assert_equal(expect, eval(result))
@with_fixture
def test_stream_pkg(self, venv):
expect = resource_query(venv, 'resource_string', 'assets/asset_b.txt')
result = flex_operation(venv, "print repr(m.stream('asset_b.txt').read())", None)
tools.assert_equal(expect, eval(result))
@with_fixture
def test_stream_dir(self, venv):
expect = open(os.path.join(FIXTURE_ALTPATH, 'asset_b.txt'), 'rb').read()
result = flex_operation(venv, "print repr(m.stream('asset_b.txt').read())", FIXTURE_ALTPATH)
tools.assert_equal(expect, eval(result))
@with_fixture
def test_writer_dir(self, venv):
_ = flex_operation(venv, "s = m.writer('write_asset'); s.write('some junk'); s.close()", FIXTURE_ALTPATH)
received = open(os.path.join(FIXTURE_ALTPATH, 'write_asset'), 'rb').read()
tools.assert_equal('some junk', received)
@tools.raises(NotImplementedError)
@with_fixture
def test_writer_pkg(self, venv):
exception = flex_operation(venv, "\ntry:\n m.writer('write_asset')\nexcept Exception as e:\n print repr(e)", None)
raise eval(exception)
| {
"content_hash": "09fd1d5e12cf6d79466cf7d8e3f10d0a",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 181,
"avg_line_length": 36.9375,
"alnum_prop": 0.5812833528569569,
"repo_name": "ethanrowe/python-data-packager",
"id": "77755712841496fd13aacfbf825d68a6bc2e30bc",
"size": "7683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_packager/test/packager_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18558"
}
],
"symlink_target": ""
} |
"""
HTML PRE-based UI implementation
"""
from urwid import util
from urwid.main_loop import ExitMainLoop
from urwid.display_common import AttrSpec, BaseScreen
# replace control characters with ?'s
_trans_table = "?" * 32 + "".join([chr(x) for x in range(32, 256)])
_default_foreground = 'black'
_default_background = 'light gray'
class HtmlGeneratorSimulationError(Exception):
pass
class HtmlGenerator(BaseScreen):
# class variables
fragments = []
sizes = []
keys = []
started = True
def __init__(self):
super(HtmlGenerator, self).__init__()
self.colors = 16
self.bright_is_bold = False # ignored
self.has_underline = True # ignored
self.register_palette_entry(None,
_default_foreground, _default_background)
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
if colors is None:
colors = self.colors
if bright_is_bold is None:
bright_is_bold = self.bright_is_bold
if has_underline is None:
has_underline = self.has_underline
self.colors = colors
self.bright_is_bold = bright_is_bold
self.has_underline = has_underline
def set_mouse_tracking(self, enable=True):
"""Not yet implemented"""
pass
def start(self):
pass
def stop(self):
pass
def set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def run_wrapper(self,fn):
"""Call fn."""
return fn()
def draw_screen(self, xxx_todo_changeme, r ):
"""Create an html fragment from the render object.
Append it to HtmlGenerator.fragments list.
"""
(cols, rows) = xxx_todo_changeme
l = []
assert r.rows() == rows
if r.cursor is not None:
cx, cy = r.cursor
else:
cx = cy = None
y = -1
for row in r.content():
y += 1
col = 0
for a, cs, run in row:
run = run.translate(_trans_table)
if isinstance(a, AttrSpec):
aspec = a
else:
aspec = self._palette[a][
{1: 1, 16: 0, 88:2, 256:3}[self.colors]]
if y == cy and col <= cx:
run_width = util.calc_width(run, 0,
len(run))
if col+run_width > cx:
l.append(html_span(run,
aspec, cx-col))
else:
l.append(html_span(run, aspec))
col += run_width
else:
l.append(html_span(run, aspec))
l.append("\n")
# add the fragment to the list
self.fragments.append( "<pre>%s</pre>" % "".join(l) )
def clear(self):
"""
Force the screen to be completely repainted on the next
call to draw_screen().
(does nothing for html_fragment)
"""
pass
def get_cols_rows(self):
"""Return the next screen size in HtmlGenerator.sizes."""
if not self.sizes:
raise HtmlGeneratorSimulationError("Ran out of screen sizes to return!")
return self.sizes.pop(0)
def get_input(self, raw_keys=False):
"""Return the next list of keypresses in HtmlGenerator.keys."""
if not self.keys:
raise ExitMainLoop()
if raw_keys:
return (self.keys.pop(0), [])
return self.keys.pop(0)
_default_aspec = AttrSpec(_default_foreground, _default_background)
(_d_fg_r, _d_fg_g, _d_fg_b, _d_bg_r, _d_bg_g, _d_bg_b) = (
_default_aspec.get_rgb_values())
def html_span(s, aspec, cursor = -1):
fg_r, fg_g, fg_b, bg_r, bg_g, bg_b = aspec.get_rgb_values()
# use real colours instead of default fg/bg
if fg_r is None:
fg_r, fg_g, fg_b = _d_fg_r, _d_fg_g, _d_fg_b
if bg_r is None:
bg_r, bg_g, bg_b = _d_bg_r, _d_bg_g, _d_bg_b
html_fg = "#%02x%02x%02x" % (fg_r, fg_g, fg_b)
html_bg = "#%02x%02x%02x" % (bg_r, bg_g, bg_b)
if aspec.standout:
html_fg, html_bg = html_bg, html_fg
extra = (";text-decoration:underline" * aspec.underline +
";font-weight:bold" * aspec.bold)
def html_span(fg, bg, s):
if not s: return ""
return ('<span style="color:%s;'
'background:%s%s">%s</span>' %
(fg, bg, extra, html_escape(s)))
if cursor >= 0:
c_off, _ign = util.calc_text_pos(s, 0, len(s), cursor)
c2_off = util.move_next_char(s, c_off, len(s))
return (html_span(html_fg, html_bg, s[:c_off]) +
html_span(html_bg, html_fg, s[c_off:c2_off]) +
html_span(html_fg, html_bg, s[c2_off:]))
else:
return html_span(html_fg, html_bg, s)
def html_escape(text):
"""Escape text so that it will be displayed safely within HTML"""
text = text.replace('&','&')
text = text.replace('<','<')
text = text.replace('>','>')
return text
def screenshot_init( sizes, keys ):
"""
Replace curses_display.Screen and raw_display.Screen class with
HtmlGenerator.
Call this function before executing an application that uses
curses_display.Screen to have that code use HtmlGenerator instead.
sizes -- list of ( columns, rows ) tuples to be returned by each call
to HtmlGenerator.get_cols_rows()
keys -- list of lists of keys to be returned by each call to
HtmlGenerator.get_input()
Lists of keys may include "window resize" to force the application to
call get_cols_rows and read a new screen size.
For example, the following call will prepare an application to:
1. start in 80x25 with its first call to get_cols_rows()
2. take a screenshot when it calls draw_screen(..)
3. simulate 5 "down" keys from get_input()
4. take a screenshot when it calls draw_screen(..)
5. simulate keys "a", "b", "c" and a "window resize"
6. resize to 20x10 on its second call to get_cols_rows()
7. take a screenshot when it calls draw_screen(..)
8. simulate a "Q" keypress to quit the application
screenshot_init( [ (80,25), (20,10) ],
[ ["down"]*5, ["a","b","c","window resize"], ["Q"] ] )
"""
try:
for (row,col) in sizes:
assert type(row) == int
assert row>0 and col>0
except (AssertionError, ValueError):
raise Exception("sizes must be in the form [ (col1,row1), (col2,row2), ...]")
try:
for l in keys:
assert type(l) == list
for k in l:
assert type(k) == str
except (AssertionError, ValueError):
raise Exception("keys must be in the form [ [keyA1, keyA2, ..], [keyB1, ..], ...]")
from . import curses_display
curses_display.Screen = HtmlGenerator
from . import raw_display
raw_display.Screen = HtmlGenerator
HtmlGenerator.sizes = sizes
HtmlGenerator.keys = keys
def screenshot_collect():
"""Return screenshots as a list of HTML fragments."""
l = HtmlGenerator.fragments
HtmlGenerator.fragments = []
return l
| {
"content_hash": "8c82ae99fb053787d7a5098baa20985f",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 91,
"avg_line_length": 31.705128205128204,
"alnum_prop": 0.5507480792559644,
"repo_name": "DarkPurpleShadow/ConnectFour",
"id": "b6785b5f1acef9a4e5d7beb0159d9a9572ffb20c",
"size": "8338",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "urwid/html_fragment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "824326"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'tempo.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| {
"content_hash": "dc622f658fa5d3ce44ecf9dbd0365069",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 37,
"avg_line_length": 20.846153846153847,
"alnum_prop": 0.5867158671586716,
"repo_name": "EliotBerriot/tempo",
"id": "ca32c65a2e2f68b50e16606361264180f096af49",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempo/users/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "595412"
},
{
"name": "HTML",
"bytes": "74777"
},
{
"name": "JavaScript",
"bytes": "906006"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "127936"
},
{
"name": "Shell",
"bytes": "8049"
}
],
"symlink_target": ""
} |
import _thread
import time
class shadowManager:
def __init__(self, MQTTClient):
if MQTTClient is None:
raise ValueError("MQTT Client is none")
self._mqttClient = MQTTClient
self._subscribe_mutex = _thread.allocate_lock()
def getClientID(self):
return self._mqttClient.getClientID()
def _getDeltaTopic(self, shadowName):
return "$aws/things/" + str(shadowName) + "/shadow/update/delta"
def _getNonDeltaTopics(self, shadowName, actionName):
generalTopic = "$aws/things/" + str(shadowName) + "/shadow/" + str(actionName)
acceptTopic = "$aws/things/" + str(shadowName) + "/shadow/" + str(actionName) + "/accepted"
rejectTopic = "$aws/things/" + str(shadowName) + "/shadow/" + str(actionName) + "/rejected"
return (generalTopic, acceptTopic, rejectTopic)
def shadowPublish(self, shadowName, shadowAction, payload):
(generalTopic, acceptTopic, rejectTopic) = self._getNonDeltaTopics(shadowName, shadowAction)
self._mqttClient.publish(generalTopic, payload, 0, False)
def shadowSubscribe(self, shadowName, shadowAction, callback):
self._subscribe_mutex.acquire()
if shadowAction == "delta":
deltaTopic = self._getDeltaTopic(shadowName)
self._mqttClient.subscribe(deltaTopic, 0, callback)
else:
(generalTopic, acceptTopic, rejectTopic) = self._getNonDeltaTopics(shadowName, shadowAction)
self._mqttClient.subscribe(acceptTopic, 0, callback)
self._mqttClient.subscribe(rejectTopic, 0, callback)
time.sleep(2)
self._subscribe_mutex.release()
def shadowUnsubscribe(self, srcShadowName, srcShadowAction):
self._subscribe_mutex.acquire()
currentShadowAction = _shadowAction(srcShadowName, srcShadowAction)
if shadowAction == "delta":
deltaTopic = self._getDeltaTopic(shadowName)
self._mqttClient.unsubscribe(deltaTopic)
else:
(generalTopic, acceptTopic, rejectTopic) = self._getNonDeltaTopics(shadowName, shadowAction)
self._mqttClient.unsubscribe(acceptTopic)
self._mqttClient.unsubscribe(rejectTopic)
self._subscribe_mutex.release()
def insertShadowCallback(self, callback, payload, status, token):
self._mqttClient.insertShadowCallback(callback, payload, status, token)
| {
"content_hash": "ea862469f9b8e1b6efe419351a164042",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 104,
"avg_line_length": 43.836363636363636,
"alnum_prop": 0.6710908336789714,
"repo_name": "Xykon/pycom-micropython-sigfox",
"id": "a84631786915212189397b523fcd1d26a4368c03",
"size": "2411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "esp32/frozen/MQTTShadowManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "32133296"
},
{
"name": "C++",
"bytes": "642137"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "104211"
},
{
"name": "Objective-C",
"bytes": "10903"
},
{
"name": "Python",
"bytes": "1000724"
},
{
"name": "Shell",
"bytes": "13441"
}
],
"symlink_target": ""
} |
from django.db import models
from people.models import User
from utils.behaviors import Permalinkable
class UserGroup(Permalinkable):
name = models.CharField(max_length=255, unique=True)
webpage_url = models.URLField(max_length=255, blank=True)
meetup_url = models.URLField(max_length=255, blank=True)
image = models.FileField(max_length=255, null=True, blank=True, upload_to='uploads/usergroups/')
is_active = models.BooleanField(default=True)
representatives = models.ManyToManyField(User, blank=True)
def __str__(self):
return self.name
| {
"content_hash": "ebab77afeb9888b9999e0be7bb481dc0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 100,
"avg_line_length": 36.25,
"alnum_prop": 0.7379310344827587,
"repo_name": "WebCampZg/conference-web",
"id": "09d35d36dbc97425775ab7ccae0b75c8d7f90ac1",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usergroups/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "131971"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "268738"
},
{
"name": "SCSS",
"bytes": "41619"
}
],
"symlink_target": ""
} |
"""
Tests for plotting.
"""
import unittest
from os.path import abspath, curdir, join
from pathlib import Path
import pyensembl
from agfusion import database, model
data = pyensembl.EnsemblRelease(84, "mouse")
db = database.AGFusionDB(abspath(join(curdir, "agfusion.mus_musculus.84.db")))
db.build = "mus_musculus_84"
data_human = pyensembl.EnsemblRelease(75, "human")
db_human = database.AGFusionDB(abspath(join(curdir, "agfusion.homo_sapiens.75.db")))
db_human.build = "homo_sapiens_75"
class TestSequencePredictionHuman(unittest.TestCase):
"""Test correctly predict human fusions"""
def test_1(self):
"""
test CDS and protein correct for junction that is on exon boundaries and
produces an out-of-frame protein.
"""
# test the dna and protein coding sequences are correct by comparing
# with manually generally sequences
fusion = model.Fusion(
gene5prime="TMEM87B",
gene5primejunction=112843681,
gene3prime="MERTK",
gene3primejunction=112722768,
db=db_human,
pyensembl_data=data_human,
protein_databases=["pfam"],
noncanonical=False,
)
fusion.save_images("DLG1-BRAF_mouse")
assert Path(
"DLG1-BRAF_mouse/ENST00000283206_ENST00000295408.png"
).exists(), "Could not save image."
| {
"content_hash": "9ca4e30e24f908b21020510af3e85913",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 29.0625,
"alnum_prop": 0.657347670250896,
"repo_name": "murphycj/AGFusion",
"id": "68588644096020fc09e1bd763f1b63715d5707ba",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199632"
},
{
"name": "Shell",
"bytes": "1740"
}
],
"symlink_target": ""
} |
"""Tests for email output plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from future.builtins import range
from grr_response_core import config
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_server import email_alerts
from grr_response_server.output_plugins import email_plugin
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class EmailOutputPluginTest(flow_test_lib.FlowTestsBaseclass):
"""Tests email output plugin."""
def setUp(self):
super(EmailOutputPluginTest, self).setUp()
self.hostname = "somehostname"
self.client_id = self.SetupClient(0, fqdn=self.hostname)
self.results_urn = rdf_client.ClientURN(self.client_id).Add("Results")
self.email_messages = []
self.email_address = "notify@%s" % config.CONFIG["Logging.domain"]
def ProcessResponses(self,
plugin_args=None,
responses=None,
process_responses_separately=False):
plugin_cls = email_plugin.EmailOutputPlugin
plugin, plugin_state = plugin_cls.CreatePluginAndDefaultState(
source_urn=self.results_urn, args=plugin_args, token=self.token)
messages = []
for response in responses:
messages.append(
rdf_flows.GrrMessage(source=self.client_id, payload=response))
def SendEmail(address, sender, title, message, **_):
self.email_messages.append(
dict(address=address, sender=sender, title=title, message=message))
with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail):
if process_responses_separately:
for message in messages:
plugin.ProcessResponses(plugin_state, [message])
else:
plugin.ProcessResponses(plugin_state, messages)
plugin.Flush(plugin_state)
plugin.UpdateState(plugin_state)
def testEmailPluginSendsEmailPerEveyBatchOfResponses(self):
self.ProcessResponses(
plugin_args=email_plugin.EmailOutputPluginArgs(
email_address=self.email_address),
responses=[rdf_client.Process(pid=42)])
self.assertLen(self.email_messages, 1)
msg = self.email_messages[0]
self.assertEqual(msg["address"], self.email_address)
self.assertIn("got a new result in %s" % self.results_urn, msg["title"])
self.assertIn(self.client_id, msg["message"])
self.assertIn(self.hostname, msg["message"])
def testEmailPluginStopsSendingEmailsAfterLimitIsReached(self):
responses = [rdf_client.Process(pid=i) for i in range(11)]
self.ProcessResponses(
plugin_args=email_plugin.EmailOutputPluginArgs(
email_address=self.email_address, emails_limit=10),
responses=responses,
process_responses_separately=True)
self.assertLen(self.email_messages, 10)
for msg in self.email_messages:
self.assertEqual(msg["address"], self.email_address)
self.assertIn("got a new result in %s" % self.results_urn, msg["title"])
self.assertIn(self.client_id, msg["message"])
self.assertIn(self.hostname, msg["message"])
for msg in self.email_messages[:10]:
self.assertNotIn("sending of emails will be disabled now", msg)
self.assertIn("sending of emails will be disabled now",
self.email_messages[9]["message"])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "6dd4dc661846e6917df4db8aa5403bed",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 35.81,
"alnum_prop": 0.6984082658475286,
"repo_name": "dunkhong/grr",
"id": "cdbd9f5d2a138e6f7e18d25ab3b7e8c7bff6fca6",
"size": "3603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/output_plugins/email_plugin_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
"""Verify that all OSF Storage files have Glacier backups and parity files,
creating any missing backups.
TODO: Add check against Glacier inventory
Note: Must have par2 installed to run
"""
from __future__ import division
import gc
import os
import math
import thread
import hashlib
import logging
import pyrax
from modularodm import Q
from boto.glacier.layer2 import Layer2
from pyrax.exceptions import NoSuchObject
from framework.celery_tasks import app as celery_app
from website.app import init_app
from website.files import models
from scripts import utils as scripts_utils
from scripts.osfstorage import utils as storage_utils
from scripts.osfstorage import settings as storage_settings
container_primary = None
container_parity = None
vault = None
audit_temp_path = None
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logging.getLogger('boto').setLevel(logging.CRITICAL)
def delete_temp_file(version):
path = os.path.join(audit_temp_path, version.location['object'])
try:
os.remove(path)
except OSError:
pass
def download_from_cloudfiles(version):
path = os.path.join(audit_temp_path, version.location['object'])
if os.path.exists(path):
# we cannot assume the file is valid and not from a previous failure.
delete_temp_file(version)
try:
obj = container_primary.get_object(version.location['object'])
with open(path, 'wb') as fp:
hasher = hashlib.sha256()
fetcher = obj.fetch(chunk_size=262144000) # 256mb chunks
while True:
try:
chunk = next(fetcher)
except StopIteration:
break
hasher.update(chunk)
fp.write(chunk)
if hasher.hexdigest() != version.metadata['sha256']:
raise Exception('SHA256 mismatch, cannot continue')
return path
except NoSuchObject as err:
logger.error('*** FILE NOT FOUND ***')
logger.error('Exception:')
logger.exception(err)
logger.error('Version info:')
logger.error(version.to_storage())
return None
def ensure_glacier(version, dry_run):
if version.metadata.get('archive'):
return
logger.warn('Glacier archive for version {0} not found'.format(version._id))
if dry_run:
return
file_path = download_from_cloudfiles(version)
if file_path:
glacier_id = vault.upload_archive(file_path, description=version.location['object'])
version.metadata['archive'] = glacier_id
version.save()
def check_parity_files(version):
index = list(container_parity.list_all(prefix='{0}.par2'.format(version.location['object'])))
vols = list(container_parity.list_all(prefix='{0}.vol'.format(version.location['object'])))
return len(index) == 1 and len(vols) >= 1
def ensure_parity(version, dry_run):
if check_parity_files(version):
return
logger.warn('Parity files for version {0} not found'.format(version._id))
if dry_run:
return
file_path = download_from_cloudfiles(version)
if file_path:
parity_paths = storage_utils.create_parity_files(file_path)
for parity_path in parity_paths:
container_parity.create(parity_path)
os.remove(parity_path)
if not check_parity_files(version):
logger.error('Parity files for version {0} not found after update'.format(version._id))
def ensure_backups(version, dry_run):
ensure_glacier(version, dry_run)
ensure_parity(version, dry_run)
delete_temp_file(version)
def glacier_targets():
return models.FileVersion.find(
Q('status', 'ne', 'cached') &
Q('location.object', 'exists', True) &
Q('metadata.archive', 'eq', None)
)
def parity_targets():
# TODO: Add metadata.parity information from wb so we do not need to check remote services
return models.FileVersion.find(
Q('status', 'ne', 'cached') &
Q('location.object', 'exists', True)
# & Q('metadata.parity', 'eq', None)
)
def audit(targets, nworkers, worker_id, dry_run):
maxval = math.ceil(targets.count() / nworkers)
idx = 0
last_progress = -1
for version in targets:
if hash(version._id) % nworkers == worker_id:
if version.size == 0:
continue
ensure_backups(version, dry_run)
idx += 1
progress = int(idx / maxval * 100)
if last_progress < 100 and last_progress < progress:
logger.info(str(progress) + '%')
last_progress = progress
# clear modm cache so we don't run out of memory from the cursor enumeration
models.FileVersion._cache.clear()
models.FileVersion._object_cache.clear()
gc.collect()
def main(nworkers, worker_id, dry_run):
logger.info('glacier audit start')
audit(glacier_targets(), nworkers, worker_id, dry_run)
logger.info('glacier audit complete')
logger.info('parity audit start')
audit(parity_targets(), nworkers, worker_id, dry_run)
logger.info('parity audit complete')
@celery_app.task(name='scripts.osfstorage.files_audit_0')
def file_audit_1(num_of_workers=4, dry_run=True):
run_main(num_of_workers, 0, dry_run)
@celery_app.task(name='scripts.osfstorage.files_audit_1')
def file_audit_2(num_of_workers=4, dry_run=True):
run_main(num_of_workers, 1, dry_run)
@celery_app.task(name='scripts.osfstorage.files_audit_2')
def file_audit_3(num_of_workers=4, dry_run=True):
run_main(num_of_workers, 2, dry_run)
@celery_app.task(name='scripts.osfstorage.files_audit_3')
def file_audit_4(num_of_workers=4, dry_run=True):
run_main(num_of_workers, 3, dry_run)
def run_main(num_of_workers, worker_id, dry_run):
# Set up storage backends
init_app(set_backends=True, routes=False)
try:
# Authenticate to Rackspace
pyrax.settings.set('identity_type', 'rackspace')
pyrax.set_credentials(
storage_settings.USERNAME,
storage_settings.API_KEY,
region=storage_settings.REGION
)
container_primary = pyrax.cloudfiles.get_container(storage_settings.PRIMARY_CONTAINER_NAME)
container_parity = pyrax.cloudfiles.get_container(storage_settings.PARITY_CONTAINER_NAME)
# Connect to AWS
layer2 = Layer2(
aws_access_key_id=storage_settings.AWS_ACCESS_KEY,
aws_secret_access_key=storage_settings.AWS_SECRET_KEY,
)
vault = layer2.get_vault(storage_settings.GLACIER_VAULT)
# Log to file
if not dry_run:
scripts_utils.add_file_logger(logger, __file__, suffix=worker_id)
audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id))
try:
os.makedirs(audit_temp_path)
except OSError:
pass
main(num_of_workers, worker_id, dry_run)
except Exception as err:
logger.error('=== Unexpected Error ===')
logger.exception(err)
raise err
| {
"content_hash": "beea3e7da367cfefd2a73908e6f637a6",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 99,
"avg_line_length": 32.00892857142857,
"alnum_prop": 0.6443514644351465,
"repo_name": "zachjanicki/osf.io",
"id": "9d266e7d39f61b942b4a872ac3408884cf661f16",
"size": "7211",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "scripts/osfstorage/files_audit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "145253"
},
{
"name": "HTML",
"bytes": "107077"
},
{
"name": "JavaScript",
"bytes": "1579614"
},
{
"name": "Mako",
"bytes": "666549"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5326873"
}
],
"symlink_target": ""
} |
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_digital_twins_management_client_enums import *
class CheckNameRequest(msrest.serialization.Model):
"""The result returned from a database check name availability request.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. Resource name.
:vartype name: str
:ivar type: The type of resource, for instance Microsoft.DigitalTwins/digitalTwinsInstances.
Has constant value: "Microsoft.DigitalTwins/digitalTwinsInstances".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.DigitalTwins/digitalTwinsInstances"
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required. Resource name.
:paramtype name: str
"""
super(CheckNameRequest, self).__init__(**kwargs)
self.name = name
class CheckNameResult(msrest.serialization.Model):
"""The result returned from a check name availability request.
:ivar name_available: Specifies a Boolean value that indicates if the name is available.
:vartype name_available: bool
:ivar message: Message indicating an unavailable name due to a conflict, or a description of
the naming rules that are violated.
:vartype message: str
:ivar reason: Message providing the reason why the given name is invalid. Possible values
include: "Invalid", "AlreadyExists".
:vartype reason: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.Reason
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
message: Optional[str] = None,
reason: Optional[Union[str, "Reason"]] = None,
**kwargs
):
"""
:keyword name_available: Specifies a Boolean value that indicates if the name is available.
:paramtype name_available: bool
:keyword message: Message indicating an unavailable name due to a conflict, or a description of
the naming rules that are violated.
:paramtype message: str
:keyword reason: Message providing the reason why the given name is invalid. Possible values
include: "Invalid", "AlreadyExists".
:paramtype reason: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.Reason
"""
super(CheckNameResult, self).__init__(**kwargs)
self.name_available = name_available
self.message = message
self.reason = reason
class ConnectionProperties(msrest.serialization.Model):
"""The properties of a private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The provisioning state. Possible values include: "Pending",
"Approved", "Rejected", "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesProvisioningState
:ivar private_endpoint:
:vartype private_endpoint:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateEndpoint
:ivar group_ids: The list of group ids for the private endpoint connection.
:vartype group_ids: list[str]
:ivar private_link_service_connection_state:
:vartype private_link_service_connection_state:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateLinkServiceConnectionState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'private_endpoint': {'key': 'privateEndpoint', 'type': 'ConnectionPropertiesPrivateEndpoint'},
'group_ids': {'key': 'groupIds', 'type': '[str]'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'ConnectionPropertiesPrivateLinkServiceConnectionState'},
}
def __init__(
self,
*,
private_endpoint: Optional["ConnectionPropertiesPrivateEndpoint"] = None,
group_ids: Optional[List[str]] = None,
private_link_service_connection_state: Optional["ConnectionPropertiesPrivateLinkServiceConnectionState"] = None,
**kwargs
):
"""
:keyword private_endpoint:
:paramtype private_endpoint:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateEndpoint
:keyword group_ids: The list of group ids for the private endpoint connection.
:paramtype group_ids: list[str]
:keyword private_link_service_connection_state:
:paramtype private_link_service_connection_state:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateLinkServiceConnectionState
"""
super(ConnectionProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.private_endpoint = private_endpoint
self.group_ids = group_ids
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateEndpoint(msrest.serialization.Model):
"""The private endpoint property of a private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class ConnectionPropertiesPrivateEndpoint(PrivateEndpoint):
"""ConnectionPropertiesPrivateEndpoint.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ConnectionPropertiesPrivateEndpoint, self).__init__(**kwargs)
class ConnectionState(msrest.serialization.Model):
"""The current state of a private endpoint connection.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. The status of a private endpoint connection. Possible values include:
"Pending", "Approved", "Rejected", "Disconnected".
:vartype status: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateLinkServiceConnectionStatus
:ivar description: Required. The description for the current state of a private endpoint
connection.
:vartype description: str
:ivar actions_required: Actions required for a private endpoint connection.
:vartype actions_required: str
"""
_validation = {
'status': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Union[str, "PrivateLinkServiceConnectionStatus"],
description: str,
actions_required: Optional[str] = None,
**kwargs
):
"""
:keyword status: Required. The status of a private endpoint connection. Possible values
include: "Pending", "Approved", "Rejected", "Disconnected".
:paramtype status: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateLinkServiceConnectionStatus
:keyword description: Required. The description for the current state of a private endpoint
connection.
:paramtype description: str
:keyword actions_required: Actions required for a private endpoint connection.
:paramtype actions_required: str
"""
super(ConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class ConnectionPropertiesPrivateLinkServiceConnectionState(ConnectionState):
"""ConnectionPropertiesPrivateLinkServiceConnectionState.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. The status of a private endpoint connection. Possible values include:
"Pending", "Approved", "Rejected", "Disconnected".
:vartype status: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateLinkServiceConnectionStatus
:ivar description: Required. The description for the current state of a private endpoint
connection.
:vartype description: str
:ivar actions_required: Actions required for a private endpoint connection.
:vartype actions_required: str
"""
_validation = {
'status': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Union[str, "PrivateLinkServiceConnectionStatus"],
description: str,
actions_required: Optional[str] = None,
**kwargs
):
"""
:keyword status: Required. The status of a private endpoint connection. Possible values
include: "Pending", "Approved", "Rejected", "Disconnected".
:paramtype status: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateLinkServiceConnectionStatus
:keyword description: Required. The description for the current state of a private endpoint
connection.
:paramtype description: str
:keyword actions_required: Actions required for a private endpoint connection.
:paramtype actions_required: str
"""
super(ConnectionPropertiesPrivateLinkServiceConnectionState, self).__init__(status=status, description=description, actions_required=actions_required, **kwargs)
class DigitalTwinsResource(msrest.serialization.Model):
"""The common properties of a DigitalTwinsInstance.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
:ivar identity: The managed identity for the DigitalTwinsInstance.
:vartype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'DigitalTwinsIdentity'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["DigitalTwinsIdentity"] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
:keyword identity: The managed identity for the DigitalTwinsInstance.
:paramtype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
"""
super(DigitalTwinsResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.identity = identity
class DigitalTwinsDescription(DigitalTwinsResource):
"""The description of the DigitalTwins service.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
:ivar identity: The managed identity for the DigitalTwinsInstance.
:vartype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
:ivar created_time: Time when DigitalTwinsInstance was created.
:vartype created_time: ~datetime.datetime
:ivar last_updated_time: Time when DigitalTwinsInstance was updated.
:vartype last_updated_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state. Possible values include: "Provisioning",
"Deleting", "Updating", "Succeeded", "Failed", "Canceled", "Deleted", "Warning", "Suspending",
"Restoring", "Moving".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.ProvisioningState
:ivar host_name: Api endpoint to work with DigitalTwinsInstance.
:vartype host_name: str
:ivar private_endpoint_connections:
:vartype private_endpoint_connections:
list[~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnection]
:ivar public_network_access: Public network access for the DigitalTwinsInstance. Possible
values include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PublicNetworkAccess
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$'},
'type': {'readonly': True},
'location': {'required': True},
'created_time': {'readonly': True},
'last_updated_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'host_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'DigitalTwinsIdentity'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'last_updated_time': {'key': 'properties.lastUpdatedTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["DigitalTwinsIdentity"] = None,
private_endpoint_connections: Optional[List["PrivateEndpointConnection"]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
:keyword identity: The managed identity for the DigitalTwinsInstance.
:paramtype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
:keyword private_endpoint_connections:
:paramtype private_endpoint_connections:
list[~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnection]
:keyword public_network_access: Public network access for the DigitalTwinsInstance. Possible
values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PublicNetworkAccess
"""
super(DigitalTwinsDescription, self).__init__(location=location, tags=tags, identity=identity, **kwargs)
self.created_time = None
self.last_updated_time = None
self.provisioning_state = None
self.host_name = None
self.private_endpoint_connections = private_endpoint_connections
self.public_network_access = public_network_access
class DigitalTwinsDescriptionListResult(msrest.serialization.Model):
"""A list of DigitalTwins description objects with a next link.
:ivar next_link: The link used to get the next page of DigitalTwins description objects.
:vartype next_link: str
:ivar value: A list of DigitalTwins description objects.
:vartype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsDescription]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DigitalTwinsDescription]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["DigitalTwinsDescription"]] = None,
**kwargs
):
"""
:keyword next_link: The link used to get the next page of DigitalTwins description objects.
:paramtype next_link: str
:keyword value: A list of DigitalTwins description objects.
:paramtype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsDescription]
"""
super(DigitalTwinsDescriptionListResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ExternalResource(msrest.serialization.Model):
"""Definition of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: Extension resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ExternalResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class DigitalTwinsEndpointResource(ExternalResource):
"""DigitalTwinsInstance endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: Extension resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required. DigitalTwinsInstance endpoint resource properties.
:vartype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsEndpointResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DigitalTwinsEndpointResourceProperties'},
}
def __init__(
self,
*,
properties: "DigitalTwinsEndpointResourceProperties",
**kwargs
):
"""
:keyword properties: Required. DigitalTwinsInstance endpoint resource properties.
:paramtype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsEndpointResourceProperties
"""
super(DigitalTwinsEndpointResource, self).__init__(**kwargs)
self.properties = properties
class DigitalTwinsEndpointResourceListResult(msrest.serialization.Model):
"""A list of DigitalTwinsInstance Endpoints with a next link.
:ivar next_link: The link used to get the next page of DigitalTwinsInstance Endpoints.
:vartype next_link: str
:ivar value: A list of DigitalTwinsInstance Endpoints.
:vartype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsEndpointResource]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DigitalTwinsEndpointResource]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["DigitalTwinsEndpointResource"]] = None,
**kwargs
):
"""
:keyword next_link: The link used to get the next page of DigitalTwinsInstance Endpoints.
:paramtype next_link: str
:keyword value: A list of DigitalTwinsInstance Endpoints.
:paramtype value:
list[~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsEndpointResource]
"""
super(DigitalTwinsEndpointResourceListResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class DigitalTwinsEndpointResourceProperties(msrest.serialization.Model):
"""Properties related to Digital Twins Endpoint.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EventGrid, EventHub, ServiceBus.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_type: Required. The type of Digital Twins endpoint.Constant filled by server.
Possible values include: "EventHub", "EventGrid", "ServiceBus".
:vartype endpoint_type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointType
:ivar provisioning_state: The provisioning state. Possible values include: "Provisioning",
"Deleting", "Succeeded", "Failed", "Canceled", "Deleted", "Warning", "Suspending", "Restoring",
"Moving", "Disabled".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointProvisioningState
:ivar created_time: Time when the Endpoint was added to DigitalTwinsInstance.
:vartype created_time: ~datetime.datetime
:ivar authentication_type: Specifies the authentication type being used for connecting to the
endpoint. Possible values include: "KeyBased", "IdentityBased".
:vartype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:ivar dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:vartype dead_letter_secret: str
:ivar dead_letter_uri: Dead letter storage URL for identity-based authentication.
:vartype dead_letter_uri: str
"""
_validation = {
'endpoint_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'dead_letter_secret': {'key': 'deadLetterSecret', 'type': 'str'},
'dead_letter_uri': {'key': 'deadLetterUri', 'type': 'str'},
}
_subtype_map = {
'endpoint_type': {'EventGrid': 'EventGrid', 'EventHub': 'EventHub', 'ServiceBus': 'ServiceBus'}
}
def __init__(
self,
*,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
dead_letter_secret: Optional[str] = None,
dead_letter_uri: Optional[str] = None,
**kwargs
):
"""
:keyword authentication_type: Specifies the authentication type being used for connecting to
the endpoint. Possible values include: "KeyBased", "IdentityBased".
:paramtype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:keyword dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:paramtype dead_letter_secret: str
:keyword dead_letter_uri: Dead letter storage URL for identity-based authentication.
:paramtype dead_letter_uri: str
"""
super(DigitalTwinsEndpointResourceProperties, self).__init__(**kwargs)
self.endpoint_type = None # type: Optional[str]
self.provisioning_state = None
self.created_time = None
self.authentication_type = authentication_type
self.dead_letter_secret = dead_letter_secret
self.dead_letter_uri = dead_letter_uri
class DigitalTwinsIdentity(msrest.serialization.Model):
"""The managed identity for the DigitalTwinsInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of Managed Identity used by the DigitalTwinsInstance. Only SystemAssigned
is supported. Possible values include: "None", "SystemAssigned".
:vartype type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentityType
:ivar principal_id: The object id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype tenant_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "DigitalTwinsIdentityType"]] = None,
**kwargs
):
"""
:keyword type: The type of Managed Identity used by the DigitalTwinsInstance. Only
SystemAssigned is supported. Possible values include: "None", "SystemAssigned".
:paramtype type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentityType
"""
super(DigitalTwinsIdentity, self).__init__(**kwargs)
self.type = type
self.principal_id = None
self.tenant_id = None
class DigitalTwinsPatchDescription(msrest.serialization.Model):
"""The description of the DigitalTwins service.
:ivar tags: A set of tags. Instance patch properties.
:vartype tags: dict[str, str]
:ivar identity: The managed identity for the DigitalTwinsInstance.
:vartype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
:ivar properties: Properties for the DigitalTwinsInstance.
:vartype properties: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsPatchProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'DigitalTwinsIdentity'},
'properties': {'key': 'properties', 'type': 'DigitalTwinsPatchProperties'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
identity: Optional["DigitalTwinsIdentity"] = None,
properties: Optional["DigitalTwinsPatchProperties"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Instance patch properties.
:paramtype tags: dict[str, str]
:keyword identity: The managed identity for the DigitalTwinsInstance.
:paramtype identity: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsIdentity
:keyword properties: Properties for the DigitalTwinsInstance.
:paramtype properties: ~azure.mgmt.digitaltwins.v2020_12_01.models.DigitalTwinsPatchProperties
"""
super(DigitalTwinsPatchDescription, self).__init__(**kwargs)
self.tags = tags
self.identity = identity
self.properties = properties
class DigitalTwinsPatchProperties(msrest.serialization.Model):
"""The properties of a DigitalTwinsInstance.
:ivar public_network_access: Public network access for the DigitalTwinsInstance. Possible
values include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
*,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
**kwargs
):
"""
:keyword public_network_access: Public network access for the DigitalTwinsInstance. Possible
values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.PublicNetworkAccess
"""
super(DigitalTwinsPatchProperties, self).__init__(**kwargs)
self.public_network_access = public_network_access
class ErrorDefinition(msrest.serialization.Model):
"""Error definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Service specific error code which serves as the substatus for the HTTP error code.
:vartype code: str
:ivar message: Description of the error.
:vartype message: str
:ivar details: Internal error details.
:vartype details: list[~azure.mgmt.digitaltwins.v2020_12_01.models.ErrorDefinition]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDefinition]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorDefinition, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class ErrorResponse(msrest.serialization.Model):
"""Error response.
:ivar error: Error description.
:vartype error: ~azure.mgmt.digitaltwins.v2020_12_01.models.ErrorDefinition
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDefinition'},
}
def __init__(
self,
*,
error: Optional["ErrorDefinition"] = None,
**kwargs
):
"""
:keyword error: Error description.
:paramtype error: ~azure.mgmt.digitaltwins.v2020_12_01.models.ErrorDefinition
"""
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class EventGrid(DigitalTwinsEndpointResourceProperties):
"""Properties related to EventGrid.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_type: Required. The type of Digital Twins endpoint.Constant filled by server.
Possible values include: "EventHub", "EventGrid", "ServiceBus".
:vartype endpoint_type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointType
:ivar provisioning_state: The provisioning state. Possible values include: "Provisioning",
"Deleting", "Succeeded", "Failed", "Canceled", "Deleted", "Warning", "Suspending", "Restoring",
"Moving", "Disabled".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointProvisioningState
:ivar created_time: Time when the Endpoint was added to DigitalTwinsInstance.
:vartype created_time: ~datetime.datetime
:ivar authentication_type: Specifies the authentication type being used for connecting to the
endpoint. Possible values include: "KeyBased", "IdentityBased".
:vartype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:ivar dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:vartype dead_letter_secret: str
:ivar dead_letter_uri: Dead letter storage URL for identity-based authentication.
:vartype dead_letter_uri: str
:ivar topic_endpoint: Required. EventGrid Topic Endpoint.
:vartype topic_endpoint: str
:ivar access_key1: Required. EventGrid secondary accesskey. Will be obfuscated during read.
:vartype access_key1: str
:ivar access_key2: EventGrid secondary accesskey. Will be obfuscated during read.
:vartype access_key2: str
"""
_validation = {
'endpoint_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
'topic_endpoint': {'required': True},
'access_key1': {'required': True},
}
_attribute_map = {
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'dead_letter_secret': {'key': 'deadLetterSecret', 'type': 'str'},
'dead_letter_uri': {'key': 'deadLetterUri', 'type': 'str'},
'topic_endpoint': {'key': 'TopicEndpoint', 'type': 'str'},
'access_key1': {'key': 'accessKey1', 'type': 'str'},
'access_key2': {'key': 'accessKey2', 'type': 'str'},
}
def __init__(
self,
*,
topic_endpoint: str,
access_key1: str,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
dead_letter_secret: Optional[str] = None,
dead_letter_uri: Optional[str] = None,
access_key2: Optional[str] = None,
**kwargs
):
"""
:keyword authentication_type: Specifies the authentication type being used for connecting to
the endpoint. Possible values include: "KeyBased", "IdentityBased".
:paramtype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:keyword dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:paramtype dead_letter_secret: str
:keyword dead_letter_uri: Dead letter storage URL for identity-based authentication.
:paramtype dead_letter_uri: str
:keyword topic_endpoint: Required. EventGrid Topic Endpoint.
:paramtype topic_endpoint: str
:keyword access_key1: Required. EventGrid secondary accesskey. Will be obfuscated during read.
:paramtype access_key1: str
:keyword access_key2: EventGrid secondary accesskey. Will be obfuscated during read.
:paramtype access_key2: str
"""
super(EventGrid, self).__init__(authentication_type=authentication_type, dead_letter_secret=dead_letter_secret, dead_letter_uri=dead_letter_uri, **kwargs)
self.endpoint_type = 'EventGrid' # type: str
self.topic_endpoint = topic_endpoint
self.access_key1 = access_key1
self.access_key2 = access_key2
class EventHub(DigitalTwinsEndpointResourceProperties):
"""Properties related to EventHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_type: Required. The type of Digital Twins endpoint.Constant filled by server.
Possible values include: "EventHub", "EventGrid", "ServiceBus".
:vartype endpoint_type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointType
:ivar provisioning_state: The provisioning state. Possible values include: "Provisioning",
"Deleting", "Succeeded", "Failed", "Canceled", "Deleted", "Warning", "Suspending", "Restoring",
"Moving", "Disabled".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointProvisioningState
:ivar created_time: Time when the Endpoint was added to DigitalTwinsInstance.
:vartype created_time: ~datetime.datetime
:ivar authentication_type: Specifies the authentication type being used for connecting to the
endpoint. Possible values include: "KeyBased", "IdentityBased".
:vartype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:ivar dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:vartype dead_letter_secret: str
:ivar dead_letter_uri: Dead letter storage URL for identity-based authentication.
:vartype dead_letter_uri: str
:ivar connection_string_primary_key: PrimaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:vartype connection_string_primary_key: str
:ivar connection_string_secondary_key: SecondaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:vartype connection_string_secondary_key: str
:ivar endpoint_uri: The URL of the EventHub namespace for identity-based authentication. It
must include the protocol sb://.
:vartype endpoint_uri: str
:ivar entity_path: The EventHub name in the EventHub namespace for identity-based
authentication.
:vartype entity_path: str
"""
_validation = {
'endpoint_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'dead_letter_secret': {'key': 'deadLetterSecret', 'type': 'str'},
'dead_letter_uri': {'key': 'deadLetterUri', 'type': 'str'},
'connection_string_primary_key': {'key': 'connectionStringPrimaryKey', 'type': 'str'},
'connection_string_secondary_key': {'key': 'connectionStringSecondaryKey', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
}
def __init__(
self,
*,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
dead_letter_secret: Optional[str] = None,
dead_letter_uri: Optional[str] = None,
connection_string_primary_key: Optional[str] = None,
connection_string_secondary_key: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
**kwargs
):
"""
:keyword authentication_type: Specifies the authentication type being used for connecting to
the endpoint. Possible values include: "KeyBased", "IdentityBased".
:paramtype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:keyword dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:paramtype dead_letter_secret: str
:keyword dead_letter_uri: Dead letter storage URL for identity-based authentication.
:paramtype dead_letter_uri: str
:keyword connection_string_primary_key: PrimaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:paramtype connection_string_primary_key: str
:keyword connection_string_secondary_key: SecondaryConnectionString of the endpoint for
key-based authentication. Will be obfuscated during read.
:paramtype connection_string_secondary_key: str
:keyword endpoint_uri: The URL of the EventHub namespace for identity-based authentication. It
must include the protocol sb://.
:paramtype endpoint_uri: str
:keyword entity_path: The EventHub name in the EventHub namespace for identity-based
authentication.
:paramtype entity_path: str
"""
super(EventHub, self).__init__(authentication_type=authentication_type, dead_letter_secret=dead_letter_secret, dead_letter_uri=dead_letter_uri, **kwargs)
self.endpoint_type = 'EventHub' # type: str
self.connection_string_primary_key = connection_string_primary_key
self.connection_string_secondary_key = connection_string_secondary_key
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
class GroupIdInformation(msrest.serialization.Model):
"""The group information for creating a private endpoint on Digital Twin.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar properties: Required.
:vartype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.GroupIdInformationPropertiesAutoGenerated
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'properties': {'required': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'GroupIdInformationPropertiesAutoGenerated'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: "GroupIdInformationPropertiesAutoGenerated",
id: Optional[str] = None,
**kwargs
):
"""
:keyword properties: Required.
:paramtype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.GroupIdInformationPropertiesAutoGenerated
:keyword id: The resource identifier.
:paramtype id: str
"""
super(GroupIdInformation, self).__init__(**kwargs)
self.properties = properties
self.id = id
self.name = None
self.type = None
class GroupIdInformationProperties(msrest.serialization.Model):
"""The properties for a group information object.
:ivar group_id: The group id.
:vartype group_id: str
:ivar required_members: The required members for a specific group id.
:vartype required_members: list[str]
:ivar required_zone_names: The required DNS zones for a specific group id.
:vartype required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
"""
:keyword group_id: The group id.
:paramtype group_id: str
:keyword required_members: The required members for a specific group id.
:paramtype required_members: list[str]
:keyword required_zone_names: The required DNS zones for a specific group id.
:paramtype required_zone_names: list[str]
"""
super(GroupIdInformationProperties, self).__init__(**kwargs)
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names
class GroupIdInformationPropertiesAutoGenerated(GroupIdInformationProperties):
"""GroupIdInformationPropertiesAutoGenerated.
:ivar group_id: The group id.
:vartype group_id: str
:ivar required_members: The required members for a specific group id.
:vartype required_members: list[str]
:ivar required_zone_names: The required DNS zones for a specific group id.
:vartype required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
"""
:keyword group_id: The group id.
:paramtype group_id: str
:keyword required_members: The required members for a specific group id.
:paramtype required_members: list[str]
:keyword required_zone_names: The required DNS zones for a specific group id.
:paramtype required_zone_names: list[str]
"""
super(GroupIdInformationPropertiesAutoGenerated, self).__init__(group_id=group_id, required_members=required_members, required_zone_names=required_zone_names, **kwargs)
class GroupIdInformationResponse(msrest.serialization.Model):
"""The available private link resources for a Digital Twin.
:ivar value: The list of available private link resources for a Digital Twin.
:vartype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.GroupIdInformation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GroupIdInformation]'},
}
def __init__(
self,
*,
value: Optional[List["GroupIdInformation"]] = None,
**kwargs
):
"""
:keyword value: The list of available private link resources for a Digital Twin.
:paramtype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.GroupIdInformation]
"""
super(GroupIdInformationResponse, self).__init__(**kwargs)
self.value = value
class Operation(msrest.serialization.Model):
"""DigitalTwins service REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: Operation properties display.
:vartype display: ~azure.mgmt.digitaltwins.v2020_12_01.models.OperationDisplay
:ivar origin: The intended executor of the operation.
:vartype origin: str
:ivar is_data_action: If the operation is a data action (for data plane rbac).
:vartype is_data_action: bool
"""
_validation = {
'name': {'readonly': True},
'origin': {'readonly': True},
'is_data_action': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
"""
:keyword display: Operation properties display.
:paramtype display: ~azure.mgmt.digitaltwins.v2020_12_01.models.OperationDisplay
"""
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
self.origin = None
self.is_data_action = None
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft DigitalTwins.
:vartype provider: str
:ivar resource: Resource Type: DigitalTwinsInstances.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Friendly description for the operation,.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""A list of DigitalTwins service operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link: The link used to get the next page of DigitalTwins description objects.
:vartype next_link: str
:ivar value: A list of DigitalTwins operations supported by the Microsoft.DigitalTwins resource
provider.
:vartype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.Operation]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword next_link: The link used to get the next page of DigitalTwins description objects.
:paramtype next_link: str
"""
super(OperationListResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The private endpoint connection of a Digital Twin.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required.
:vartype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateEndpointConnectionProperties'},
}
def __init__(
self,
*,
properties: "PrivateEndpointConnectionProperties",
**kwargs
):
"""
:keyword properties: Required.
:paramtype properties:
~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnectionProperties
"""
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class PrivateEndpointConnectionProperties(ConnectionProperties):
"""PrivateEndpointConnectionProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The provisioning state. Possible values include: "Pending",
"Approved", "Rejected", "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesProvisioningState
:ivar private_endpoint:
:vartype private_endpoint:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateEndpoint
:ivar group_ids: The list of group ids for the private endpoint connection.
:vartype group_ids: list[str]
:ivar private_link_service_connection_state:
:vartype private_link_service_connection_state:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateLinkServiceConnectionState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'private_endpoint': {'key': 'privateEndpoint', 'type': 'ConnectionPropertiesPrivateEndpoint'},
'group_ids': {'key': 'groupIds', 'type': '[str]'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'ConnectionPropertiesPrivateLinkServiceConnectionState'},
}
def __init__(
self,
*,
private_endpoint: Optional["ConnectionPropertiesPrivateEndpoint"] = None,
group_ids: Optional[List[str]] = None,
private_link_service_connection_state: Optional["ConnectionPropertiesPrivateLinkServiceConnectionState"] = None,
**kwargs
):
"""
:keyword private_endpoint:
:paramtype private_endpoint:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateEndpoint
:keyword group_ids: The list of group ids for the private endpoint connection.
:paramtype group_ids: list[str]
:keyword private_link_service_connection_state:
:paramtype private_link_service_connection_state:
~azure.mgmt.digitaltwins.v2020_12_01.models.ConnectionPropertiesPrivateLinkServiceConnectionState
"""
super(PrivateEndpointConnectionProperties, self).__init__(private_endpoint=private_endpoint, group_ids=group_ids, private_link_service_connection_state=private_link_service_connection_state, **kwargs)
class PrivateEndpointConnectionsResponse(msrest.serialization.Model):
"""The available private link connections for a Digital Twin.
:ivar value: The list of available private link connections for a Digital Twin.
:vartype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateEndpointConnection"]] = None,
**kwargs
):
"""
:keyword value: The list of available private link connections for a Digital Twin.
:paramtype value: list[~azure.mgmt.digitaltwins.v2020_12_01.models.PrivateEndpointConnection]
"""
super(PrivateEndpointConnectionsResponse, self).__init__(**kwargs)
self.value = value
class ServiceBus(DigitalTwinsEndpointResourceProperties):
"""Properties related to ServiceBus.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_type: Required. The type of Digital Twins endpoint.Constant filled by server.
Possible values include: "EventHub", "EventGrid", "ServiceBus".
:vartype endpoint_type: str or ~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointType
:ivar provisioning_state: The provisioning state. Possible values include: "Provisioning",
"Deleting", "Succeeded", "Failed", "Canceled", "Deleted", "Warning", "Suspending", "Restoring",
"Moving", "Disabled".
:vartype provisioning_state: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.EndpointProvisioningState
:ivar created_time: Time when the Endpoint was added to DigitalTwinsInstance.
:vartype created_time: ~datetime.datetime
:ivar authentication_type: Specifies the authentication type being used for connecting to the
endpoint. Possible values include: "KeyBased", "IdentityBased".
:vartype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:ivar dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:vartype dead_letter_secret: str
:ivar dead_letter_uri: Dead letter storage URL for identity-based authentication.
:vartype dead_letter_uri: str
:ivar primary_connection_string: PrimaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:vartype primary_connection_string: str
:ivar secondary_connection_string: SecondaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:vartype secondary_connection_string: str
:ivar endpoint_uri: The URL of the ServiceBus namespace for identity-based authentication. It
must include the protocol sb://.
:vartype endpoint_uri: str
:ivar entity_path: The ServiceBus Topic name for identity-based authentication.
:vartype entity_path: str
"""
_validation = {
'endpoint_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'dead_letter_secret': {'key': 'deadLetterSecret', 'type': 'str'},
'dead_letter_uri': {'key': 'deadLetterUri', 'type': 'str'},
'primary_connection_string': {'key': 'primaryConnectionString', 'type': 'str'},
'secondary_connection_string': {'key': 'secondaryConnectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
}
def __init__(
self,
*,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
dead_letter_secret: Optional[str] = None,
dead_letter_uri: Optional[str] = None,
primary_connection_string: Optional[str] = None,
secondary_connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
**kwargs
):
"""
:keyword authentication_type: Specifies the authentication type being used for connecting to
the endpoint. Possible values include: "KeyBased", "IdentityBased".
:paramtype authentication_type: str or
~azure.mgmt.digitaltwins.v2020_12_01.models.AuthenticationType
:keyword dead_letter_secret: Dead letter storage secret for key-based authentication. Will be
obfuscated during read.
:paramtype dead_letter_secret: str
:keyword dead_letter_uri: Dead letter storage URL for identity-based authentication.
:paramtype dead_letter_uri: str
:keyword primary_connection_string: PrimaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:paramtype primary_connection_string: str
:keyword secondary_connection_string: SecondaryConnectionString of the endpoint for key-based
authentication. Will be obfuscated during read.
:paramtype secondary_connection_string: str
:keyword endpoint_uri: The URL of the ServiceBus namespace for identity-based authentication.
It must include the protocol sb://.
:paramtype endpoint_uri: str
:keyword entity_path: The ServiceBus Topic name for identity-based authentication.
:paramtype entity_path: str
"""
super(ServiceBus, self).__init__(authentication_type=authentication_type, dead_letter_secret=dead_letter_secret, dead_letter_uri=dead_letter_uri, **kwargs)
self.endpoint_type = 'ServiceBus' # type: str
self.primary_connection_string = primary_connection_string
self.secondary_connection_string = secondary_connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
| {
"content_hash": "5b46f4161c67db21b4642d01b5abcc64",
"timestamp": "",
"source": "github",
"line_count": 1554,
"max_line_length": 208,
"avg_line_length": 40.30759330759331,
"alnum_prop": 0.652479325648967,
"repo_name": "Azure/azure-sdk-for-python",
"id": "204072a793129ec9ba037666fd3272897aa08394",
"size": "63106",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_12_01/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
class Student(object):
def __init__(self, name):
self.name = name
def __call__(self, *args, **kwargs):
print('My name is %s,' % self.name)
s = Student('Kobe')
s()
| {
"content_hash": "6de24357283d6ba729312fa42603998a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 19,
"alnum_prop": 0.531578947368421,
"repo_name": "wuchengang/PythonLearing",
"id": "fb004e90bab4e7c329533792c4f3f6507c1ad743",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "special_call.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "32694"
},
{
"name": "Python",
"bytes": "7070"
}
],
"symlink_target": ""
} |
import logging
import re
import os
import subprocess
from edalize.edatool import Edatool
logger = logging.getLogger(__name__)
class Veriblelint(Edatool):
argtypes = ["vlogdefine", "vlogparam"]
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {
"description": "Verible lint backend (verible-verilog-lint)",
"members": [
{
"name": "ruleset",
"type": "String",
"desc": "Ruleset: [default|all|none]",
},
],
"lists": [
{
"name": "verible_lint_args",
"type": "String",
"desc": "Extra command line arguments passed to the Verible tool",
},
{
"name": "rules",
"type": "String",
"desc": 'What rules to use. Prefix a rule name with "-" to disable it.',
},
],
}
def build_main(self):
pass
def _get_tool_args(self):
args = ["--lint_fatal", "--parse_fatal"]
if "rules" in self.tool_options:
args.append("--rules=" + ",".join(self.tool_options["rules"]))
if "ruleset" in self.tool_options:
args.append("--ruleset=" + self.tool_options["ruleset"])
if "verible_lint_args" in self.tool_options:
args += self.tool_options["verible_lint_args"]
return args
def run_main(self):
(src_files, incdirs) = self._get_fileset_files(force_slash=True)
src_files_filtered = []
config_files_filtered = []
waiver_files_filtered = []
for src_file in src_files:
ft = src_file.file_type
if ft.startswith("verilogSource") or ft.startswith("systemVerilogSource"):
src_files_filtered.append(src_file.name)
elif ft == "veribleLintRules":
config_files_filtered.append(src_file.name)
elif ft == "veribleLintWaiver":
waiver_files_filtered.append(src_file.name)
if len(src_files_filtered) == 0:
logger.warning("No SystemVerilog source files to be processed.")
return
lint_fail = False
args = self._get_tool_args()
if len(config_files_filtered) > 1:
raise RuntimeError(
"Verible lint only supports a single rules file (type veribleLintRules)"
)
elif len(config_files_filtered) == 1:
args.append("--rules_config=" + config_files_filtered[0])
if waiver_files_filtered:
args.append("--waiver_files=" + ",".join(waiver_files_filtered))
for src_file in src_files_filtered:
cmd = ["verible-verilog-lint"] + args + [src_file]
logger.debug("Running " + " ".join(cmd))
try:
res = subprocess.run(cmd, cwd=self.work_root, check=False)
except FileNotFoundError:
_s = "Command '{}' not found. Make sure it is in $PATH"
raise RuntimeError(_s.format(cmd[0]))
if res.returncode != 0:
lint_fail = True
if lint_fail:
raise RuntimeError("Lint failed")
| {
"content_hash": "2eda3709f0226fada0cd5cc99b6eaa35",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 96,
"avg_line_length": 34.01,
"alnum_prop": 0.5016171714201706,
"repo_name": "lowRISC/edalize",
"id": "58c02e50979219ced3497149a9aeccb8896b4233",
"size": "3543",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "edalize/veriblelint.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9334"
},
{
"name": "Forth",
"bytes": "985"
},
{
"name": "Jinja",
"bytes": "33184"
},
{
"name": "Makefile",
"bytes": "32637"
},
{
"name": "Python",
"bytes": "414295"
},
{
"name": "Shell",
"bytes": "250"
},
{
"name": "Tcl",
"bytes": "60425"
},
{
"name": "VHDL",
"bytes": "534"
},
{
"name": "Verilog",
"bytes": "19"
}
],
"symlink_target": ""
} |
"""
Component to interface with various switches that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/switch/
"""
import asyncio
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.config import load_yaml_config_file
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE,
ATTR_ENTITY_ID)
from homeassistant.components import group
DOMAIN = 'switch'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_SWITCHES = 'all switches'
ENTITY_ID_ALL_SWITCHES = group.ENTITY_ID_FORMAT.format('all_switches')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
PROP_TO_ATTR = {
'current_power_w': ATTR_CURRENT_POWER_W,
'today_energy_kwh': ATTR_TODAY_ENERGY_KWH,
}
SWITCH_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the switch is on based on the statemachine.
Async friendly.
"""
entity_id = entity_id or ENTITY_ID_ALL_SWITCHES
return hass.states.is_state(entity_id, STATE_ON)
@bind_hass
def turn_on(hass, entity_id=None):
"""Turn all or specified switch on."""
hass.add_job(async_turn_on, hass, entity_id)
@callback
@bind_hass
def async_turn_on(hass, entity_id=None):
"""Turn all or specified switch on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data))
@bind_hass
def turn_off(hass, entity_id=None):
"""Turn all or specified switch off."""
hass.add_job(async_turn_off, hass, entity_id)
@callback
@bind_hass
def async_turn_off(hass, entity_id=None):
"""Turn all or specified switch off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data))
@bind_hass
def toggle(hass, entity_id=None):
"""Toggle all or specified switch."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for switches."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_SWITCHES)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_switch_service(service):
"""Handle calls to the switch services."""
target_switches = component.async_extract_from_service(service)
for switch in target_switches:
if service.service == SERVICE_TURN_ON:
yield from switch.async_turn_on()
elif service.service == SERVICE_TOGGLE:
yield from switch.async_toggle()
else:
yield from switch.async_turn_off()
update_tasks = []
for switch in target_switches:
if not switch.should_poll:
continue
update_coro = hass.async_add_job(
switch.async_update_ha_state(True))
if hasattr(switch, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, async_handle_switch_service,
descriptions.get(SERVICE_TURN_OFF), schema=SWITCH_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_switch_service,
descriptions.get(SERVICE_TURN_ON), schema=SWITCH_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, async_handle_switch_service,
descriptions.get(SERVICE_TOGGLE), schema=SWITCH_SERVICE_SCHEMA)
return True
class SwitchDevice(ToggleEntity):
"""Representation of a switch."""
# pylint: disable=no-self-use
@property
def current_power_w(self):
"""Return the current power usage in W."""
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return None
@property
def is_standby(self):
"""Return true if device is in standby."""
return None
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value:
data[attr] = value
return data
| {
"content_hash": "764a48093d0dd560c2291f2105e9beab",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 29.921787709497206,
"alnum_prop": 0.6708364451082898,
"repo_name": "LinuxChristian/home-assistant",
"id": "a53c6c5c01f21d6e3cfb1bf4baa511379066ed15",
"size": "5356",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1733802"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7415265"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
} |
"""Command line utility for querying the Logitech Harmony."""
import argparse
import logging
import pprint
import sys
from harmony import auth
from harmony import client as harmony_client
def login_to_logitech(args):
"""Logs in to the Logitech service.
Args:
args: argparse arguments needed to login.
Returns:
Session token that can be used to log in to the Harmony device.
"""
token = auth.login(args.email, args.password)
if not token:
sys.exit('Could not get token from Logitech server.')
session_token = auth.swap_auth_token(
args.harmony_ip, args.harmony_port, token)
if not session_token:
sys.exit('Could not swap login token for session token.')
return session_token
def show_config(args):
"""Connects to the Harmony and prints its configuration."""
token = login_to_logitech(args)
client = harmony_client.create_and_connect_client(
args.harmony_ip, args.harmony_port, token)
pprint.pprint(client.get_config())
client.disconnect(send_close=True)
return 0
def main():
"""Main method for the script."""
parser = argparse.ArgumentParser(
description='pyharmony utility script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required flags go here.
required_flags = parser.add_argument_group('required arguments')
required_flags.add_argument('--email', required=True, help=(
'Logitech username in the form of an email address.'))
required_flags.add_argument(
'--password', required=True, help='Logitech password.')
required_flags.add_argument(
'--harmony_ip', required=True, help='IP Address of the Harmony device.')
# Flags with defaults go here.
parser.add_argument('--harmony_port', default=5222, type=int, help=(
'Network port that the Harmony is listening on.'))
loglevels = dict((logging.getLevelName(level), level)
for level in [10, 20, 30, 40, 50])
parser.add_argument('--loglevel', default='INFO', choices=loglevels.keys(),
help='Logging level to print to the console.')
subparsers = parser.add_subparsers()
list_devices_parser = subparsers.add_parser(
'show_config', help='Print the Harmony device configuration.')
list_devices_parser.set_defaults(func=show_config)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format='%(levelname)s\t%(name)s\t%(message)s')
sys.exit(args.func(args))
if __name__ == '__main__':
main()
| {
"content_hash": "42d29a31bb9ce46c2e58c9de5f4df8e3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 31.790123456790123,
"alnum_prop": 0.6722330097087379,
"repo_name": "petele/pyharmony",
"id": "2624c6c566d054bff522a4ac7e9ae882bdb0a0bd",
"size": "2599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "harmony/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8594"
}
],
"symlink_target": ""
} |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import os
import inspect
import multiprocessing
import reveal_user_classification
########################################################################################################################
# Configure path related functions.
########################################################################################################################
def get_package_path():
return os.path.dirname(inspect.getfile(reveal_user_classification))
########################################################################################################################
# Configure optimization related functions.
########################################################################################################################
def get_threads_number():
"""
Automatically determine the number of cores. If that fails, the number defaults to a manual setting.
"""
try:
cores_number = multiprocessing.cpu_count()
return cores_number
except NotImplementedError:
cores_number = 8
return cores_number
| {
"content_hash": "62df74e6c9bce3975d3122ece6da202a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 120,
"avg_line_length": 38.58620689655172,
"alnum_prop": 0.4075067024128686,
"repo_name": "MKLab-ITI/reveal-user-classification",
"id": "40194fad39c84fd5d6f20b4d6b97d2b4da651b0d",
"size": "1119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reveal_user_classification/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128831"
}
],
"symlink_target": ""
} |
"""The tests the MQTT alarm control panel component."""
import copy
import json
import pytest
from homeassistant.components import alarm_control_panel
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_DISARMING,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.alarm_control_panel import common
CODE_NUMBER = "1234"
CODE_TEXT = "HELLO_CODE"
DEFAULT_CONFIG = {
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
}
DEFAULT_CONFIG_CODE = {
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "1234",
"code_arm_required": True,
}
}
async def test_fail_setup_without_state_topic(hass, mqtt_mock):
"""Test for failing with no state topic."""
with assert_setup_component(0) as config:
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"command_topic": "alarm/command",
}
},
)
assert not config[alarm_control_panel.DOMAIN]
async def test_fail_setup_without_command_topic(hass, mqtt_mock):
"""Test failing with no command topic."""
with assert_setup_component(0):
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"state_topic": "alarm/state",
}
},
)
async def test_update_state_via_state_topic(hass, mqtt_mock):
"""Test updating with via state topic."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_UNKNOWN
for state in (
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_PENDING,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMING,
STATE_ALARM_TRIGGERED,
):
async_fire_mqtt_message(hass, "alarm/state", state)
assert hass.states.get(entity_id).state == state
async def test_ignore_update_state_if_unknown_via_state_topic(hass, mqtt_mock):
"""Test ignoring updates via state topic."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "alarm/state", "unsupported state")
assert hass.states.get(entity_id).state == STATE_UNKNOWN
async def test_arm_home_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_home(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_HOME", 0, False
)
async def test_arm_home_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid.
When code_arm_required = True
"""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_home(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_home_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config,)
await hass.async_block_till_done()
await common.async_alarm_arm_home(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_HOME", 0, False
)
async def test_arm_away_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_away(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_AWAY", 0, False
)
async def test_arm_away_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_away(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_away_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config,)
await hass.async_block_till_done()
await common.async_alarm_arm_away(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_AWAY", 0, False
)
async def test_arm_night_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_night(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_NIGHT", 0, False
)
async def test_arm_night_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_night(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_night_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config,)
await hass.async_block_till_done()
await common.async_alarm_arm_night(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_NIGHT", 0, False
)
async def test_arm_custom_bypass_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
},
)
await hass.async_block_till_done()
await common.async_alarm_arm_custom_bypass(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_CUSTOM_BYPASS", 0, False
)
async def test_arm_custom_bypass_not_publishes_mqtt_with_invalid_code_when_req(
hass, mqtt_mock
):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "1234",
"code_arm_required": True,
}
},
)
await hass.async_block_till_done()
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_custom_bypass(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_custom_bypass_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "1234",
"code_arm_required": False,
}
},
)
await hass.async_block_till_done()
await common.async_alarm_arm_custom_bypass(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_CUSTOM_BYPASS", 0, False
)
async def test_disarm_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed."""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass)
mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False)
async def test_disarm_publishes_mqtt_with_template(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed.
When command_template set to output json
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code"] = "1234"
config[alarm_control_panel.DOMAIN]["command_template"] = (
'{"action":"{{ action }}",' '"code":"{{ code }}"}'
)
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config,)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass, 1234)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", '{"action":"DISARM","code":"1234"}', 0, False
)
async def test_disarm_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed.
When code_disarm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code"] = "1234"
config[alarm_control_panel.DOMAIN]["code_disarm_required"] = False
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config,)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass)
mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False)
async def test_disarm_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_disarm_required = True
"""
assert await async_setup_component(
hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_disarm(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_update_state_via_state_topic_template(hass, mqtt_mock):
"""Test updating with template_value via state topic."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"state_topic": "test-topic",
"value_template": "\
{% if (value | int) == 100 %}\
armed_away\
{% else %}\
disarmed\
{% endif %}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "test-topic", "100")
state = hass.states.get("alarm_control_panel.test")
assert state.state == STATE_ALARM_ARMED_AWAY
async def test_attributes_code_number(hass, mqtt_mock):
"""Test attributes which are not supported by the vacuum."""
config = copy.deepcopy(DEFAULT_CONFIG)
config[alarm_control_panel.DOMAIN]["code"] = CODE_NUMBER
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert (
state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT)
== alarm_control_panel.FORMAT_NUMBER
)
async def test_attributes_code_text(hass, mqtt_mock):
"""Test attributes which are not supported by the vacuum."""
config = copy.deepcopy(DEFAULT_CONFIG)
config[alarm_control_panel.DOMAIN]["code"] = CODE_TEXT
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert (
state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT)
== alarm_control_panel.FORMAT_TEXT
)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one alarm per unique_id."""
config = {
alarm_control_panel.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, alarm_control_panel.DOMAIN, config)
async def test_discovery_removal_alarm(hass, mqtt_mock, caplog):
"""Test removal of discovered alarm_control_panel."""
data = json.dumps(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
await help_test_discovery_removal(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data
)
async def test_discovery_update_alarm(hass, mqtt_mock, caplog):
"""Test update of discovered alarm_control_panel."""
config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
await help_test_discovery_update(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT alarm control panel device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT alarm control panel device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
| {
"content_hash": "16eefebd76ba32ba36bea04acede923a",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 88,
"avg_line_length": 33.126959247648905,
"alnum_prop": 0.6535131298793471,
"repo_name": "pschmitt/home-assistant",
"id": "aa6452fd9c882568d70bb628e73d7f00d1ac81a5",
"size": "21135",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug", hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(level="debug", schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.tasks -l {0}'.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
run(bin_prefix(cmd), pty=True)
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
inv requirements --metrics
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_api():
"""Run the API test suite."""
test_module(module="api_tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
test_api()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_osf():
"""
Run half of the tests to help travis go faster
"""
flake()
jshint()
test_osf()
@task
def test_travis_else():
"""
Run other half of the tests to help travis go faster
"""
test_addons()
test_api()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def build_js_config_files():
from website import settings
from website.app import build_js_config_files as _build_js_config_files
print('Building JS config files...')
_build_js_config_files(settings)
print("...Done.")
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
build_js_config_files()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
| {
"content_hash": "dfcb555247fb4b17032b364b6cd316dd",
"timestamp": "",
"source": "github",
"line_count": 876,
"max_line_length": 125,
"avg_line_length": 29.269406392694062,
"alnum_prop": 0.6354524180967238,
"repo_name": "cosenal/osf.io",
"id": "9b3b0e6dc13792cae18da767f3abe289e84a4601",
"size": "25686",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tasks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119459"
},
{
"name": "HTML",
"bytes": "31299"
},
{
"name": "JavaScript",
"bytes": "1180009"
},
{
"name": "Mako",
"bytes": "538621"
},
{
"name": "Python",
"bytes": "3694878"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
} |
import inspect
from datetime import datetime
from flask import current_app
from notifications_utils.clients.zendesk.zendesk_client import (
NotifySupportTicket,
)
from app import zendesk_client
from app.celery.broadcast_message_tasks import send_broadcast_event
from app.config import QueueNames
from app.dao.dao_utils import dao_save_object
from app.errors import InvalidRequest
from app.models import (
BroadcastEvent,
BroadcastEventMessageType,
BroadcastStatusType,
)
def update_broadcast_message_status(broadcast_message, new_status, updating_user=None, api_key_id=None):
_validate_broadcast_update(broadcast_message, new_status, updating_user)
if new_status == BroadcastStatusType.BROADCASTING:
broadcast_message.approved_at = datetime.utcnow()
broadcast_message.approved_by = updating_user
if new_status == BroadcastStatusType.CANCELLED:
broadcast_message.cancelled_at = datetime.utcnow()
broadcast_message.cancelled_by = updating_user
broadcast_message.cancelled_by_api_key_id = api_key_id
current_app.logger.info(
f"broadcast_message {broadcast_message.id} moving from {broadcast_message.status} to {new_status}"
)
broadcast_message.status = new_status
dao_save_object(broadcast_message)
_create_p1_zendesk_alert(broadcast_message)
if new_status in {BroadcastStatusType.BROADCASTING, BroadcastStatusType.CANCELLED}:
_create_broadcast_event(broadcast_message)
def _validate_broadcast_update(broadcast_message, new_status, updating_user):
if new_status not in BroadcastStatusType.ALLOWED_STATUS_TRANSITIONS[broadcast_message.status]:
raise InvalidRequest(
f"Cannot move broadcast_message {broadcast_message.id} from {broadcast_message.status} to {new_status}",
status_code=400,
)
if new_status == BroadcastStatusType.BROADCASTING:
# training mode services can approve their own broadcasts
if updating_user == broadcast_message.created_by and not broadcast_message.service.restricted:
raise InvalidRequest(
f"User {updating_user.id} cannot approve their own broadcast_message {broadcast_message.id}",
status_code=400,
)
elif len(broadcast_message.areas["simple_polygons"]) == 0:
raise InvalidRequest(
f"broadcast_message {broadcast_message.id} has no selected areas and so cannot be broadcasted.",
status_code=400,
)
def _create_p1_zendesk_alert(broadcast_message):
if current_app.config["NOTIFY_ENVIRONMENT"] != "live":
return
if broadcast_message.status != BroadcastStatusType.BROADCASTING:
return
if broadcast_message.stubbed:
return
message = inspect.cleandoc(
f"""
Broadcast Sent
https://www.notifications.service.gov.uk/services/{broadcast_message.service_id}/current-alerts/{broadcast_message.id}
Sent on channel {broadcast_message.service.broadcast_channel} to {broadcast_message.areas["names"]}.
Content starts "{broadcast_message.content[:100]}".
Follow the runbook to check the broadcast went out OK:
https://docs.google.com/document/d/1J99yOlfp4nQz6et0w5oJVqi-KywtIXkxrEIyq_g2XUs/edit#heading=h.lzr9aq5b4wg
"""
)
ticket = NotifySupportTicket(
subject="Live broadcast sent",
message=message,
ticket_type=NotifySupportTicket.TYPE_INCIDENT,
technical_ticket=True,
org_id=current_app.config["BROADCAST_ORGANISATION_ID"],
org_type="central",
service_id=str(broadcast_message.service_id),
p1=True,
)
zendesk_client.send_ticket_to_zendesk(ticket)
def _create_broadcast_event(broadcast_message):
"""
If the service is live and the broadcast message is not stubbed, creates a broadcast event, stores it in the
database, and triggers the task to send the CAP XML off.
"""
service = broadcast_message.service
if not broadcast_message.stubbed and not service.restricted:
msg_types = {
BroadcastStatusType.BROADCASTING: BroadcastEventMessageType.ALERT,
BroadcastStatusType.CANCELLED: BroadcastEventMessageType.CANCEL,
}
event = BroadcastEvent(
service=service,
broadcast_message=broadcast_message,
message_type=msg_types[broadcast_message.status],
transmitted_content={"body": broadcast_message.content},
transmitted_areas=broadcast_message.areas,
# TODO: Probably move this somewhere more standalone too and imply that it shouldn't change. Should it
# include a service based identifier too? eg "flood-warnings@notifications.service.gov.uk" or similar
transmitted_sender="notifications.service.gov.uk",
# TODO: Should this be set to now? Or the original starts_at?
transmitted_starts_at=broadcast_message.starts_at,
transmitted_finishes_at=broadcast_message.finishes_at,
)
dao_save_object(event)
send_broadcast_event.apply_async(kwargs={"broadcast_event_id": str(event.id)}, queue=QueueNames.BROADCASTS)
elif broadcast_message.stubbed != service.restricted:
# It's possible for a service to create a broadcast in trial mode, and then approve it after the
# service is live (or vice versa). We don't think it's safe to send such broadcasts, as the service
# has changed since they were created. Log an error instead.
current_app.logger.error(
f"Broadcast event not created. Stubbed status of broadcast message was {broadcast_message.stubbed}"
f' but service was {"in trial mode" if service.restricted else "live"}'
)
| {
"content_hash": "76491497ffaa76e95270cf2417bd7131",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 126,
"avg_line_length": 41.255319148936174,
"alnum_prop": 0.6948598934158501,
"repo_name": "alphagov/notifications-api",
"id": "fead6821ebcf842535ee888dbc243f703925dd31",
"size": "5817",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/broadcast_message/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
__author__ = 'Vimalkumar Velayudhan'
__email__ = 'vimalkumarvelayudhan@gmail.com'
__version__ = '1.3.2'
import os.path
pkg_dir = os.path.abspath(os.path.dirname(__file__))
theme_path = os.path.split(pkg_dir)[0]
| {
"content_hash": "53dfedc177d09d4628c81d84bd32bae4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 26.625,
"alnum_prop": 0.676056338028169,
"repo_name": "aristotle-mdr/user-documentation",
"id": "fdf261144409e54a26130e7c5cf04fecd1f531b9",
"size": "260",
"binary": false,
"copies": "3",
"ref": "refs/heads/draft",
"path": "python/docs_conf_and_theme/aristotle_theme/_themes/aristotle_theme/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10993"
},
{
"name": "HTML",
"bytes": "1739"
},
{
"name": "Python",
"bytes": "11868"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
} |
import datetime
import random
import re
import irc.bot
import irc.strings
from irc.client import ip_quad_to_numstr
from random_replies import hello, thanks, unknown_cmd
from utils import write_to_destination
TEXT = (
"Hi, I am reviewbot. Type something like this: "
"'Pls add to review list <your_patch>' "
"on the channel and I'll add it to the Review list. "
"Or simply msg me something lke: 'review list <your_patch>'."
)
REVIEW_LIST_REGEXP = (
r"(add[\s\S]*(to|in)[\s\S]*review\s*(list|queue)|need[\s\S]review)")
PATCHES_REGEX = r"https?://\S+"
class TestBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, server, port=6667):
irc.bot.SingleServerIRCBot.__init__(
self, [(server, port)], nickname, nickname)
self.channel = "#" + channel
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
print("Joined ", self.channel)
def on_privmsg(self, c, e):
self.do_command(e, e.arguments[0])
def on_pubmsg(self, c, e):
matches = re.search(REVIEW_LIST_REGEXP, e.arguments[0], re.IGNORECASE)
if matches:
links_found = re.findall(
PATCHES_REGEX, e.arguments[0], re.IGNORECASE)
for link in links_found:
print(str(datetime.datetime.now()), " : ", link)
# Send to hackmd
result = write_to_destination(link)
if result is None:
result = "I could not add the review to Review List"
self.connection.privmsg(self.channel, result)
# For nick mentions
elif self.connection.get_nickname() in e.arguments[0]:
if any(s in e.arguments[0].lower() for s in thanks):
self.connection.privmsg(self.channel, "You are welcome :)")
elif " reviews" in e.arguments[0]:
self.connection.privmsg(self.channel,
"Do you want me to add your patch to the "
"Review list? Please type something like "
"add to review list <your_patch> so that "
"I can understand. Thanks.")
def do_command(self, e, cmd):
nick = e.source.nick
c = self.connection
if cmd == "disconnect":
self.disconnect()
elif cmd == "die":
self.die()
elif cmd == "stats":
for chname, chobj in self.channels.items():
c.notice(nick, "--- Channel statistics ---")
c.notice(nick, "Channel: " + chname)
users = sorted(chobj.users())
c.notice(nick, "Users: " + ", ".join(users))
opers = sorted(chobj.opers())
c.notice(nick, "Opers: " + ", ".join(opers))
voiced = sorted(chobj.voiced())
c.notice(nick, "Voiced: " + ", ".join(voiced))
elif cmd == "dcc":
dcc = self.dcc_listen()
c.ctcp(
"DCC",
nick,
"CHAT chat %s %d"
% (ip_quad_to_numstr(dcc.localaddress), dcc.localport),
)
elif any(s in cmd.lower() for s in hello):
c.privmsg(nick, TEXT)
elif any(s in cmd.lower() for s in thanks):
c.privmsg(nick, "You are welcome :)")
# Remove code duplication
elif re.search(r"review\s*list", cmd, re.IGNORECASE):
patches_regex = r"https?://\S+"
links_found = re.findall(patches_regex, e.arguments[0],
re.IGNORECASE)
for link in links_found:
print(link)
# Send to hackmd
result = write_to_destination(link)
if result is None:
result = "I could not add the review to Review List"
c.privmsg(nick, result)
else:
rand_index = random.randint(0, len(unknown_cmd) - 1)
print("rand_index: ", rand_index)
c.notice(nick, unknown_cmd[rand_index])
def main():
import sys
if len(sys.argv) != 4:
print("Usage: testbot <server[:port]> <channel> <nickname>")
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
bot = TestBot(channel, nickname, server, port)
bot.start()
if __name__ == "__main__":
main()
| {
"content_hash": "357fdec323d6416730e8fdf7bd596792",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 78,
"avg_line_length": 35.05925925925926,
"alnum_prop": 0.521233889710543,
"repo_name": "redhat-openstack/rdo-infra",
"id": "cfc48b1cd045da36a471d0722dbb696be1dda918",
"size": "4756",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ci-scripts/reviewbot/ircbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "Dockerfile",
"bytes": "2412"
},
{
"name": "Go",
"bytes": "29397"
},
{
"name": "HTML",
"bytes": "31437"
},
{
"name": "Jinja",
"bytes": "12823"
},
{
"name": "Python",
"bytes": "730685"
},
{
"name": "Shell",
"bytes": "65445"
}
],
"symlink_target": ""
} |
import sys
major = sys.version_info.major
minor = sys.version_info.minor
python_version = (major, minor)
def is_python_2():
return python_version[0] == 2
def is_python_3():
return python_version[0] == 3
| {
"content_hash": "0f33e9d89f218353c8c0e1498c9fa431",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 33,
"avg_line_length": 19.454545454545453,
"alnum_prop": 0.6822429906542056,
"repo_name": "FranDepascuali/wikiquotes-python-api",
"id": "b90a89dc3916d0755614e395ca36b1ff1a1b2813",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikiquotes/managers/python_version_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26798"
}
],
"symlink_target": ""
} |
from scenario_one import scenario_one
from scheduler import scheduler
from simulation import run_scenario
# Scenario 3: Same as scenario 2, but now we let the leases
# expire before the server comes back.
def scenario_three(reporter):
job = scenario_one(reporter)
scheduler.add_relative(120, lambda: job.lose_master())
scheduler.add_relative(190, lambda: job.trigger_master_election())
reporter.set_filename('scenario_three')
if __name__ == '__main__':
run_scenario(lambda reporter: scenario_three(reporter))
| {
"content_hash": "6e584603cd677ddb67de3101a420bbb4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 32.6875,
"alnum_prop": 0.7533460803059273,
"repo_name": "youtube/doorman",
"id": "529f57f9b9827ff7c12d7ef29b11331fa9ccae37",
"size": "1123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simulation/scenario_three.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "219292"
},
{
"name": "Makefile",
"bytes": "188"
},
{
"name": "Protocol Buffer",
"bytes": "13775"
},
{
"name": "Python",
"bytes": "90295"
},
{
"name": "Shell",
"bytes": "883"
}
],
"symlink_target": ""
} |
"""
The temp module provides a NamedTemporaryFile that can be reopened in the same
process on any platform. Most platforms use the standard Python
tempfile.NamedTemporaryFile class, but Windows users are given a custom class.
This is needed because the Python implementation of NamedTemporaryFile uses the
O_TEMPORARY flag under Windows, which prevents the file from being reopened
if the same flag is not provided [1][2]. Note that this does not address the
more general issue of opening a file for writing and reading in multiple
processes in a manner that works across platforms.
The custom version of NamedTemporaryFile doesn't support the same keyword
arguments available in tempfile.NamedTemporaryFile.
1: https://mail.python.org/pipermail/python-list/2005-December/336957.html
2: http://bugs.python.org/issue14243
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that supports reopening of the
temporary file in Windows.
Unlike tempfile.NamedTemporaryFile from the standard library,
__init__() doesn't support the 'delete', 'buffering', 'encoding', or
'newline' keyword arguments.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except OSError:
pass
def __del__(self):
self.close()
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| {
"content_hash": "d61ecb13c4779fc74f4985d0d2d3a665",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 83,
"avg_line_length": 33.7972972972973,
"alnum_prop": 0.6397441023590564,
"repo_name": "reinout/django",
"id": "5fbb91b9ee92e032d232e51befed3caedd71e829",
"size": "2501",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "django/core/files/temp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12147106"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import forms
from mezzanine.blog.models import BlogPost
from mezzanine.blog.models import BlogCategory
from mezzanine.core.models import CONTENT_STATUS_DRAFT
from mezzanine.core.forms import TinyMceWidget
class PostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = ('title', 'content', 'allow_comments', 'categories', 'related_posts')
widgets = {
'categories': forms.SelectMultiple(),
'content': TinyMceWidget(),
}
class EditPostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = ('title', 'content', 'allow_comments', 'categories', 'related_posts')
widgets = {
'categories': forms.SelectMultiple(),
'content': TinyMceWidget(),
}
| {
"content_hash": "b19b2dcb9e811623f002cd2486587306",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 29.03448275862069,
"alnum_prop": 0.6389548693586699,
"repo_name": "SISTEMAsw/TAMP",
"id": "b2c5ac8dd890017da72466f042d937955d1fc2f2",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/blogFront/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158211"
},
{
"name": "CSS",
"bytes": "28165"
},
{
"name": "HTML",
"bytes": "284694"
},
{
"name": "JavaScript",
"bytes": "9602"
},
{
"name": "Makefile",
"bytes": "772"
},
{
"name": "Python",
"bytes": "389072"
},
{
"name": "Shell",
"bytes": "4390"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
import logging
import sys
import os
import codecs
from ycmd.completers.completer import Completer
from ycmd import responses
from ycmd import utils
from ycmd import identifier_utils
identifier_utils.FILETYPE_TO_IDENTIFIER_REGEX['tex'] = re.compile( r"(?:\\[@a-zA-Z]+)|(?:\{[_\w:-]*\}?)|(?:\[[_\w:-]*\]?)")
def Update_RegEx( new ):
identifier_utils.FILETYPE_TO_IDENTIFIER_REGEX['tex'] = re.compile ( new , re.U )
return True
# To handle BibTeX properly
nobibparser = False
try:
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode, author
except ImportError:
nobibparser = True
def smart_truncate(content, length=30, suffix='...'):
if len(content) <= length:
return content
else:
return ' '.join(content[:length+1-len(suffix)].split(' ')[0:-1]) + suffix
def bib_customizations(record):
def truncate_title(record):
title = record['title'] if 'title' in record else ''
title = smart_truncate(title)
record['title'] = title
return record
def et_al(record):
author = record['author'] if 'author' in record else []
author = [a.replace(', ', ' ').replace(',', ' ') for a in author]
if len(author) == 0:
record['author'] = ''
elif len(author) == 1:
record['author'] = author[0]
else:
record['author'] = author[0] + ' et al.'
return record
record = convert_to_unicode(record)
record = author(record)
record = et_al(record)
record = truncate_title(record)
return record
LOG = logging.getLogger(__name__)
class GenericSlave (object):
"""
A generic slave for completing. Contains its own conception of
the main directory, and its own responses cache
"""
def __init__(self, output):
self._completion_target = 'none'
self._main_directory = None
self.completion_wanted = False
self._files = {}
self._cached_data = {}
self._d_cache_hits = 0
self._goto_labels = {}
self.extensions = ''
self.output_regex = re.compile(output + "$", re.UNICODE)
def ShouldUse(self, target, request_data):
"""
Returns true if it's meant to give a completion, and sets
our internal truth flag
"""
if self._main_directory is None:
self._ComputeMainDirectory(request_data)
match = self.output_regex.search(target)
if match is not None:
self.completion_wanted = True
else:
self.completion_wanted = False
return self.completion_wanted
def _Walk( self, path, what ):
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(what):
yield os.path.join(root, file)
def _ComputeMainDirectory( self, request_data ):
def FindMain(path, what):
found = False
for file in os.listdir(path):
if file.endswith(what):
self._main_directory = path
found = True
break
if not found:
new_path = os.path.dirname(os.path.normpath(path))
if new_path == path or new_path == "":
return False
else:
return FindMain(new_path, what)
return True
filepath = request_data['filepath']
path = os.path.dirname(filepath)
if not FindMain(path, self.extensions):
self._main_directory = filepath
print("Unable to set the main directory...", sys.stderr)
else:
print("Main directory successfully found at {}".format(self._main_directory),
sys.stderr)
def _CacheDataAndSkip(self, filename):
last_modification = os.path.getmtime(filename)
if filename not in self._files:
self._files[filename] = last_modification
return False, []
if last_modification <= self._files[filename]:
self._d_cache_hits += 1
return True, self._cached_data[filename]
self._files[filename] = last_modification
return False, []
def ProduceTargets(self):
"""
Gives off the completion candidates for this completer
"""
if self.completion_wanted:
return self._FindTarget()
else:
return []
def _FindTarget(self):
pass
class LatexSlave (GenericSlave):
def __init__(self, arguments):
super( LatexSlave , self).__init__(arguments['output'])
self.collect_regex = arguments['collect']
self.extensions = ".latexmain"
self._completion_target = arguments['target']
def BuildOurCompletes(self, name):
"""
Surround the response value with brackets.
TODO- perhaps add a square bracket option
"""
return responses.BuildCompletionData("{" + name + "}",
self._completion_target, None, name)
def _FindTarget(self):
"""
Find LaTeX labels for various completions
This time we scan through all .tex files in the current
directory and extract the content of all relevant commands
as sources for completion.
"""
ret = []
for filename in self._Walk(self._main_directory, ".tex"):
skip, cache = self._CacheDataAndSkip(filename)
if skip:
ret.extend(cache)
continue
resp = []
for i, line in enumerate(codecs.open(filename, 'r', 'utf-8')):
line = line.rstrip()
match = re.search(self.collect_regex, line)
if match is not None:
lid = re.sub(".*" + self.collect_regex + ".*", r"\1", line)
if not lid in ret and not lid in resp:
resp.append( lid )
#TODO- make it an option if we want gotos for
#this completion
self._goto_labels[lid] = (filename, i+1, match.start(1))
self._cached_data[filename] = resp
ret.extend(resp)
"""
we moved the building of completes to here so we can
share a cache between square and curly brackets
"""
temp = []
for i in ret:
tempo = self.BuildOurCompletes(i)
temp.append( tempo )
return temp
class BibTexSlave (GenericSlave):
def __init__(self):
super( BibTexSlave , self).__init__(r"\\[a-zA-Z]*cite[a-zA-Z]*\*?")
self.extensions = ".bib"
self._completion_target = 'Bib'
def _FindBibEntriesRegex(self):
"""
"""
ret = []
for filename in self._Walk(self._main_directory, ".bib"):
skip, cache = self._CacheDataAndSkip(filename)
if skip:
ret.extend(cache)
continue
resp = []
for line in codecs.open(filename, 'r', 'utf-8'):
line = line.rstrip()
found = re.search(r"@(.*){([^,]*).*", line)
if found is not None:
if found.group(1) != "string":
resp.append(responses.BuildCompletionData(
re.sub(r"@(.*){([^,]*).*", r"\2", line))
)
ret.extend(resp)
self._cached_data[filename] = resp
return ret
def _FindBibEntriesParser(self):
"""
"""
ret = []
parser = BibTexParser()
parser.customization = bib_customizations
for filename in self._Walk(self._main_directory, ".bib"):
skip, cache = self._CacheDataAndSkip(filename)
if skip:
ret.extend(cache)
continue
resp = []
with open(filename) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser=parser)
for entry in bib_database.entries:
if 'ID' not in entry:
continue
title = entry['title']
author = entry['author']
resp.append(responses.BuildCompletionData(
entry['ID'],
"%s (%s)" % (title, author)
))
ret.extend(resp)
self._cached_data[filename] = resp
return ret
def _FindTarget(self):
"""
Find BIBtex entries.
Using a proper BibTeXParser to be able to retrieve field from the bib
entry and add it as a help into YCM popup.
If the BibTexParser module is not available, fallbacks to smart regexes
to only acquire bibid
"""
if nobibparser:
return self._FindBibEntriesRegex()
else:
return self._FindBibEntriesParser()
class LatexCompleter( Completer ):
"""
Completer for LaTeX that takes into account BibTex entries
for completion.
"""
def __init__( self, user_options ):
super( LatexCompleter, self ).__init__( user_options )
self.environment_completer = LatexSlave({'output' : r"\\(begin|end)",
'collect': r"\\begin\{(.*?)\}",
'target': "Env"})
self.ref_completer = LatexSlave({'output':r"\\[a-zA-Z]*ref",
'collect': r"\\\w*(?<!contents)label\{(.*?)\}",
'target': "Ref"})
self.bib_completer = BibTexSlave()
self.completers = [self.environment_completer, self.ref_completer,
self.bib_completer]
#self.logfile = open("/home/veesh/latexlog", "w")
def ShouldUseNowInner( self, request_data ):
cursor = request_data["column_codepoint"] - 1
match_start = request_data["start_codepoint"] - 1
line = request_data["line_value"]
should_use = False
line_splitted = line[ : match_start ]
line_left = line[ match_start : cursor ]
if match_start:
if line[match_start] == '\\':
return should_use
"""
self.logfile.write("line split: " + line_splitted + "\n")
self.logfile.write("line left: " + line_left + "\n")
self.logfile.write("full line: " + line + "\n")
self.logfile.write("\n")
"""
for x in self.completers:
if not should_use:
should_use = x.ShouldUse(line_splitted, request_data)
else:
x.ShouldUse(line_splitted, request_data)
#self.logfile.flush()
return should_use
def SupportedFiletypes( self ):
"""
Determines which vim filetypes we support
"""
return ['plaintex', 'tex']
def _GoToDefinition(self, request_data):
def find_end_of_command(line, match):
if match is None:
return -1
for i in range(match.start(), len(line)):
e = line[i]
if e == "}":
return i
return -1
line = utils.ToUnicode(request_data["line_value"])
match = self._ref_reg.search(line)
end_of_command = find_end_of_command(line, match)
if end_of_command == -1:
raise RuntimeError( 'Can\'t jump to definition or declaration: not implemented yet' )
else:
ref = line[match.end():end_of_command]
if ref not in self._goto_labels:
raise RuntimeError( 'Can\'t jump to definition or declaration: not implemented yet' )
filename, line, col = self._goto_labels[ref]
return responses.BuildGoToResponse( filename, line, col )
def GetSubcommandsMap( self ):
return {
'GoToDefinition' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToDeclaration' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoTo' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
}
def GetDetailedDiagnostic( self, request_data ):
return responses.BuildDisplayMessageResponse(
self.DebugInfo(request_data))
def DebugInfo( self, request_data ):
"""
bib_dir = "Looking for *.bib in %s" % self._main_directory
cache = "Number of cached files: %i" % len(self._files)
hits = "Number of cache hits: %i" % self._d_cache_hits
return "%s\n%s\n%s" % (bib_dir, cache, hits)
"""
pass
def ComputeCandidatesInner( self, request_data ):
"""
Worker function executed by the asynchronous
completion thread.
"""
candidates = []
for i in self.completers:
candidates.extend(i.ProduceTargets())
print(request_data['query'], sys.stderr)
return candidates
| {
"content_hash": "82bff70fa99b124d7fda9c23f497d843",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 123,
"avg_line_length": 33.36272040302267,
"alnum_prop": 0.5385428463571159,
"repo_name": "Cocophotos/vim-ycm-latex-semantic-completer",
"id": "5bd2fd668dc353682cf75f1f263277f2cee0464b",
"size": "13267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latex_completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14191"
}
],
"symlink_target": ""
} |
from os import getpid
from os.path import isfile, isdir, join
from static_aid import config, utils
from static_aid.DataExtractor import DataExtractor
def test_is_running():
extractor = DataExtractor()
utils.remove_file_or_dir(config.PID_FILE_PATH)
assert extractor.is_running() == False, "No PID file"
with open(config.PID_FILE_PATH, "w") as pid_file:
pid_file.write("")
assert extractor.is_running() == False, "Invalid PID number"
with open(config.PID_FILE_PATH, "w") as pid_file:
pid_file.write(str(getpid()))
assert extractor.is_running() == True, "Current process"
def test_register_pid():
extractor = DataExtractor()
extractor.register_pid()
assert isfile(config.PID_FILE_PATH)
with open(config.PID_FILE_PATH, "r") as pid_file:
assert str(getpid()) in [l for l in pid_file]
def test_make_destinations():
for k in config.destinations:
utils.remove_file_or_dir(join(config.DATA_DIR, config.destinations[k]))
DataExtractor().make_destinations()
for k in config.destinations:
assert isdir(join(config.DATA_DIR, config.destinations[k]))
def test_get_last_export_time():
extractor = DataExtractor()
extractor.update = False
assert extractor.get_last_export_time() == 0
extractor.update = True
extractor.set_last_export_time(12345)
assert extractor.get_last_export_time() == 12345
def test_set_last_export_time():
extractor = DataExtractor(update=True)
extractor.set_last_export_time(54321)
assert extractor.get_last_export_time() == 54321
def teardown():
utils.remove_file_or_dir(config.PID_FILE_PATH)
| {
"content_hash": "65f13fb0a88a0aaa6cf7f0c9d8cf8edf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 33.61224489795919,
"alnum_prop": 0.6939890710382514,
"repo_name": "helrond/staticAid",
"id": "9a32b7fd264fb37391694400d9b816c431cba8d3",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "static_aid/tests/test_data_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "299498"
},
{
"name": "HTML",
"bytes": "35310"
},
{
"name": "JavaScript",
"bytes": "3149"
},
{
"name": "Python",
"bytes": "50413"
},
{
"name": "Shell",
"bytes": "2242"
}
],
"symlink_target": ""
} |
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if thread.get_ident() != self._thread_ident:
# If we're not on the IOLoop's thread, we need to synchronize
# with other threads, or waking logic will induce a race.
with self._callback_lock:
if self._closing:
return
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty:
# If we're not in the IOLoop's thread, and we added the
# first callback to an empty list, we may need to wake it
# up (it may wake up on its own, but an occasional extra
# wake is harmless). Waking up a polling IOLoop is
# relatively expensive, so we try to avoid it when we can.
self._waker.wake()
else:
if self._closing:
return
# If we're on the IOLoop's thread, we don't need the lock,
# since we don't need to wake anyone, just add the
# callback. Blindly insert into self._callbacks. This is
# safe even from signal handlers because the GIL makes
# list.append atomic. One subtlety is that if the signal
# is interrupting another thread holding the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks, but
# either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
| {
"content_hash": "cf74d9d82f12372774369b6b946c1b86",
"timestamp": "",
"source": "github",
"line_count": 1044,
"max_line_length": 93,
"avg_line_length": 39.23180076628353,
"alnum_prop": 0.5916304507056008,
"repo_name": "zguangyu/tornado",
"id": "d61831766586cebe2c7a3e228caf73816050cae1",
"size": "41555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/ioloop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1078"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12434"
},
{
"name": "JavaScript",
"bytes": "6088"
},
{
"name": "Python",
"bytes": "1538229"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
} |
import functools
import warnings
import paddle
from paddle.distribution.beta import Beta
from paddle.distribution.categorical import Categorical
from paddle.distribution.dirichlet import Dirichlet
from paddle.distribution.distribution import Distribution
from paddle.distribution.exponential_family import ExponentialFamily
from paddle.distribution.normal import Normal
from paddle.distribution.lognormal import LogNormal
from paddle.distribution.uniform import Uniform
from paddle.distribution.laplace import Laplace
from paddle.fluid.framework import _non_static_mode
__all__ = ["register_kl", "kl_divergence"]
_REGISTER_TABLE = {}
def kl_divergence(p, q):
r"""
Kullback-Leibler divergence between distribution p and q.
.. math::
KL(p||q) = \int p(x)log\frac{p(x)}{q(x)} \mathrm{d}x
Args:
p (Distribution): ``Distribution`` object. Inherits from the Distribution Base class.
q (Distribution): ``Distribution`` object. Inherits from the Distribution Base class.
Returns:
Tensor, Batchwise KL-divergence between distribution p and q.
Examples:
.. code-block:: python
import paddle
p = paddle.distribution.Beta(alpha=0.5, beta=0.5)
q = paddle.distribution.Beta(alpha=0.3, beta=0.7)
print(paddle.distribution.kl_divergence(p, q))
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [0.21193528])
"""
return _dispatch(type(p), type(q))(p, q)
def register_kl(cls_p, cls_q):
"""Decorator for register a KL divergence implemention function.
The ``kl_divergence(p, q)`` function will search concrete implemention
functions registered by ``register_kl``, according to multi-dispatch pattern.
If an implemention function is found, it will return the result, otherwise,
it will raise ``NotImplementError`` exception. Users can register
implemention funciton by the decorator.
Args:
cls_p (Distribution): The Distribution type of Instance p. Subclass derived from ``Distribution``.
cls_q (Distribution): The Distribution type of Instance q. Subclass derived from ``Distribution``.
Examples:
.. code-block:: python
import paddle
@paddle.distribution.register_kl(paddle.distribution.Beta, paddle.distribution.Beta)
def kl_beta_beta():
pass # insert implementation here
"""
if not issubclass(cls_p, Distribution) or not issubclass(
cls_q, Distribution
):
raise TypeError('cls_p and cls_q must be subclass of Distribution')
def decorator(f):
_REGISTER_TABLE[cls_p, cls_q] = f
return f
return decorator
def _dispatch(cls_p, cls_q):
"""Multiple dispatch into concrete implement function."""
# find all matched super class pair of p and q
matchs = [
(super_p, super_q)
for super_p, super_q in _REGISTER_TABLE
if issubclass(cls_p, super_p) and issubclass(cls_q, super_q)
]
if not matchs:
raise NotImplementedError
left_p, left_q = min(_Compare(*m) for m in matchs).classes
right_p, right_q = min(_Compare(*reversed(m)) for m in matchs).classes
if _REGISTER_TABLE[left_p, left_q] is not _REGISTER_TABLE[right_p, right_q]:
warnings.warn(
'Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format(
cls_p.__name__,
cls_q.__name__,
left_p.__name__,
right_q.__name__,
),
RuntimeWarning,
)
return _REGISTER_TABLE[left_p, left_q]
@functools.total_ordering
class _Compare:
def __init__(self, *classes):
self.classes = classes
def __eq__(self, other):
return self.classes == other.classes
def __le__(self, other):
for cls_x, cls_y in zip(self.classes, other.classes):
if not issubclass(cls_x, cls_y):
return False
if cls_x is not cls_y:
break
return True
@register_kl(Beta, Beta)
def _kl_beta_beta(p, q):
return (
(q.alpha.lgamma() + q.beta.lgamma() + (p.alpha + p.beta).lgamma())
- (p.alpha.lgamma() + p.beta.lgamma() + (q.alpha + q.beta).lgamma())
+ ((p.alpha - q.alpha) * p.alpha.digamma())
+ ((p.beta - q.beta) * p.beta.digamma())
+ (
((q.alpha + q.beta) - (p.alpha + p.beta))
* (p.alpha + p.beta).digamma()
)
)
@register_kl(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(p, q):
return (
(p.concentration.sum(-1).lgamma() - q.concentration.sum(-1).lgamma())
- ((p.concentration.lgamma() - q.concentration.lgamma()).sum(-1))
+ (
(
(p.concentration - q.concentration)
* (
p.concentration.digamma()
- p.concentration.sum(-1).digamma().unsqueeze(-1)
)
).sum(-1)
)
)
@register_kl(Categorical, Categorical)
def _kl_categorical_categorical(p, q):
return p.kl_divergence(q)
@register_kl(Normal, Normal)
def _kl_normal_normal(p, q):
return p.kl_divergence(q)
@register_kl(Uniform, Uniform)
def _kl_uniform_uniform(p, q):
return p.kl_divergence(q)
@register_kl(Laplace, Laplace)
def _kl_laplace_laplace(p, q):
return p.kl_divergence(q)
@register_kl(ExponentialFamily, ExponentialFamily)
def _kl_expfamily_expfamily(p, q):
"""Compute kl-divergence using `Bregman divergences <https://www.lix.polytechnique.fr/~nielsen/EntropyEF-ICIP2010.pdf>`_"""
if not type(p) == type(q):
raise NotImplementedError
p_natural_params = []
for param in p._natural_parameters:
param = param.detach()
param.stop_gradient = False
p_natural_params.append(param)
q_natural_params = q._natural_parameters
p_log_norm = p._log_normalizer(*p_natural_params)
try:
if _non_static_mode():
p_grads = paddle.grad(
p_log_norm, p_natural_params, create_graph=True
)
else:
p_grads = paddle.static.gradients(p_log_norm, p_natural_params)
except RuntimeError as e:
raise TypeError(
"Cann't compute kl_divergence({cls_p}, {cls_q}) use bregman divergence. Please register_kl({cls_p}, {cls_q}).".format(
cls_p=type(p).__name__, cls_q=type(q).__name__
)
) from e
kl = q._log_normalizer(*q_natural_params) - p_log_norm
for p_param, q_param, p_grad in zip(
p_natural_params, q_natural_params, p_grads
):
term = (q_param - p_param) * p_grad
kl -= _sum_rightmost(term, len(q.event_shape))
return kl
@register_kl(LogNormal, LogNormal)
def _kl_lognormal_lognormal(p, q):
return p._base.kl_divergence(q._base)
def _sum_rightmost(value, n):
return value.sum(list(range(-n, 0))) if n > 0 else value
| {
"content_hash": "7faf615ec1e9deb1469c0cc0fb3ee502",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 130,
"avg_line_length": 30.38695652173913,
"alnum_prop": 0.6126770639576478,
"repo_name": "PaddlePaddle/Paddle",
"id": "cf8857629893becfaee8cb69136cdd8142fb3132",
"size": "7599",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/distribution/kl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
from filer.test_settings import *
| {
"content_hash": "778f546c9f71ed9491db5843892cc670",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.7941176470588235,
"repo_name": "pbs/django-filer",
"id": "781dbc486d25114e22e96a972d2b8ba8b13ec1ac",
"size": "57",
"binary": false,
"copies": "1",
"ref": "refs/heads/master_pbs",
"path": "test_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8665"
},
{
"name": "HTML",
"bytes": "76095"
},
{
"name": "JavaScript",
"bytes": "47992"
},
{
"name": "Python",
"bytes": "757954"
},
{
"name": "Ruby",
"bytes": "799"
},
{
"name": "SCSS",
"bytes": "204269"
},
{
"name": "Shell",
"bytes": "1462"
}
],
"symlink_target": ""
} |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
import numbers
import warnings
import copy
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import clone
from ..base import RegressorMixin
from ..base import is_classifier
from ..base import MultiOutputMixin
from ..utils import Bunch
from ..utils import check_random_state
from ..utils.validation import _check_sample_weight
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from ._tree import _build_pruned_tree_ccp
from ._tree import ccp_pruning_path
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini,
"entropy": _criterion.Entropy}
# TODO: Remove "mse" in version 1.2.
CRITERIA_REG = {"squared_error": _criterion.MSE,
"mse": _criterion.MSE,
"friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE,
"poisson": _criterion.Poisson}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
@_deprecate_positional_args
def __init__(self, *,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
min_impurity_split,
class_weight=None,
ccp_alpha=0.0):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.random_state = random_state
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.ccp_alpha = ccp_alpha
def get_depth(self):
"""Return the depth of the decision tree.
The depth of a tree is the maximum distance between the root
and any leaf.
Returns
-------
self.tree_.max_depth : int
The maximum depth of the tree.
"""
check_is_fitted(self)
return self.tree_.max_depth
def get_n_leaves(self):
"""Return the number of leaves of the decision tree.
Returns
-------
self.tree_.n_leaves : int
Number of leaves.
"""
check_is_fitted(self)
return self.tree_.n_leaves
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted="deprecated"):
random_state = check_random_state(self.random_state)
if self.ccp_alpha < 0.0:
raise ValueError("ccp_alpha must be greater than or equal to 0")
if check_input:
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr.
check_X_params = dict(dtype=DTYPE, accept_sparse="csc")
check_y_params = dict(ensure_2d=False, dtype=None)
X, y = self._validate_data(X, y,
validate_separately=(check_X_params,
check_y_params))
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError("Some value(s) of y are negative which is"
" not allowed for Poisson regression.")
if np.sum(y) <= 0:
raise ValueError("Sum of y is not positive which is "
"necessary for Poisson regression.")
# Determine output settings
n_samples, self.n_features_ = X.shape
self.n_features_in_ = self.n_features_
is_classification = is_classifier(self)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (np.iinfo(np.int32).max if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, numbers.Integral):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, numbers.Integral):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError("Invalid value for max_features. "
"Allowed string values are 'auto', "
"'sqrt' or 'log2'.")
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, numbers.Integral):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either None "
"or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
min_impurity_split = self.min_impurity_split
if min_impurity_split is not None:
warnings.warn(
"The min_impurity_split parameter is deprecated. Its default "
"value has changed from 1e-7 to 0 in version 0.23, and it "
"will be removed in 1.0 (renaming of 0.25). Use the "
"min_impurity_decrease parameter instead.",
FutureWarning
)
if min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
else:
min_impurity_split = 0
if self.min_impurity_decrease < 0.:
raise ValueError("min_impurity_decrease must be greater than "
"or equal to 0")
# TODO: Remove in 1.1
if X_idx_sorted != "deprecated":
warnings.warn(
"The parameter 'X_idx_sorted' is deprecated and has no "
"effect. It will be removed in 1.1 (renaming of 0.26). You "
"can suppress this warning by not passing any value to the "
"'X_idx_sorted' parameter.",
FutureWarning
)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
# TODO: Remove in v1.2
if self.criterion == "mse":
warnings.warn(
"Criterion 'mse' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `criterion='squared_error'` "
"which is equivalent.",
FutureWarning
)
else:
# Make a deepcopy in case the criterion has mutable attributes that
# might be shared and modified concurrently during parallel fitting
criterion = copy.deepcopy(criterion)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state)
if is_classifier(self):
self.tree_ = Tree(self.n_features_,
self.n_classes_, self.n_outputs_)
else:
self.tree_ = Tree(self.n_features_,
# TODO: tree should't need this in this case
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
min_impurity_split)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1 and is_classifier(self):
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self._prune_tree()
return self
def _validate_X_predict(self, X, check_input):
"""Validate the training data on predict (probabilities)."""
if check_input:
X = self._validate_data(X, dtype=DTYPE, accept_sparse="csr",
reset=False)
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
else:
# The number of features is checked regardless of `check_input`
self._check_n_features(X, reset=False)
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_),
dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""Return the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array-like of shape (n_samples,)
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator CSR matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
def _prune_tree(self):
"""Prune tree using Minimal Cost-Complexity Pruning."""
check_is_fitted(self)
if self.ccp_alpha < 0.0:
raise ValueError("ccp_alpha must be greater than or equal to 0")
if self.ccp_alpha == 0.0:
return
# build pruned tree
if is_classifier(self):
n_classes = np.atleast_1d(self.n_classes_)
pruned_tree = Tree(self.n_features_, n_classes, self.n_outputs_)
else:
pruned_tree = Tree(self.n_features_,
# TODO: the tree shouldn't need this param
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_)
_build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)
self.tree_ = pruned_tree
def cost_complexity_pruning_path(self, X, y, sample_weight=None):
"""Compute the pruning path during Minimal Cost-Complexity Pruning.
See :ref:`minimal_cost_complexity_pruning` for details on the pruning
process.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
Returns
-------
ccp_path : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
ccp_alphas : ndarray
Effective alphas of subtree during pruning.
impurities : ndarray
Sum of the impurities of the subtree leaves for the
corresponding alpha value in ``ccp_alphas``.
"""
est = clone(self).set_params(ccp_alpha=0.0)
est.fit(X, y, sample_weight=sample_weight)
return Bunch(**ccp_pruning_path(est.tree_))
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
Normalized total reduction of criteria by feature
(Gini importance).
"""
check_is_fitted(self)
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=0
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
@_deprecate_positional_args
def __init__(self, *,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
ccp_alpha=0.0):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
ccp_alpha=ccp_alpha)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted="deprecated"):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : deprecated, default="deprecated"
This parameter is deprecated and has no effect.
It will be removed in 1.1 (renaming of 0.26).
.. deprecated :: 0.24
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
super().fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "mse", "friedman_mse", "mae", "poisson"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "mae" for the mean absolute error, which minimizes the L1 loss
using the median of each terminal node, and "poisson" which uses
reduction in Poisson deviance to find splits.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=0
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeClassifier : A decision tree classifier.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, X, y, cv=10)
... # doctest: +SKIP
...
array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,
0.16..., 0.11..., -0.73..., -0.30..., -0.00...])
"""
@_deprecate_positional_args
def __init__(self, *,
criterion="squared_error",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
ccp_alpha=0.0):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
ccp_alpha=ccp_alpha)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted="deprecated"):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : deprecated, default="deprecated"
This parameter is deprecated and has no effect.
It will be removed in 1.1 (renaming of 0.26).
.. deprecated :: 0.24
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
super().fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order='C')
averaged_predictions = np.zeros(shape=grid.shape[0],
dtype=np.float64, order='C')
self.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions)
return averaged_predictions
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : {"random", "best"}, default="random"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, {"auto", "sqrt", "log2"} or None, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Used to pick randomly the `max_features` used at each split.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
ExtraTreeRegressor : An extremely randomized tree regressor.
sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.tree import ExtraTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> extra_tree = ExtraTreeClassifier(random_state=0)
>>> cls = BaggingClassifier(extra_tree, random_state=0).fit(
... X_train, y_train)
>>> cls.score(X_test, y_test)
0.8947...
"""
@_deprecate_positional_args
def __init__(self, *,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
ccp_alpha=0.0):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state,
ccp_alpha=ccp_alpha)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "mse", "friedman_mse", "mae"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and "mae" for the
mean absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
splitter : {"random", "best"}, default="random"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, {"auto", "sqrt", "log2"} or None, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Used to pick randomly the `max_features` used at each split.
See :term:`Glossary <random_state>` for details.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
max_features_ : int
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
feature_importances_ : ndarray of shape (n_features,)
Return impurity-based feature importances (the higher, the more
important the feature).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
ExtraTreeClassifier : An extremely randomized tree classifier.
sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import BaggingRegressor
>>> from sklearn.tree import ExtraTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> extra_tree = ExtraTreeRegressor(random_state=0)
>>> reg = BaggingRegressor(extra_tree, random_state=0).fit(
... X_train, y_train)
>>> reg.score(X_test, y_test)
0.33...
"""
@_deprecate_positional_args
def __init__(self, *,
criterion="squared_error",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_decrease=0.,
min_impurity_split=None,
max_leaf_nodes=None,
ccp_alpha=0.0):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state,
ccp_alpha=ccp_alpha)
| {
"content_hash": "0fd5e1b240656a78a5d64d638993fa89",
"timestamp": "",
"source": "github",
"line_count": 1769,
"max_line_length": 79,
"avg_line_length": 41.23289994347089,
"alnum_prop": 0.592424013929066,
"repo_name": "glemaitre/scikit-learn",
"id": "420292881f7db8b73ea57419bc2b1df96693f909",
"size": "72941",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/tree/_classes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "10011694"
},
{
"name": "Shell",
"bytes": "44168"
}
],
"symlink_target": ""
} |
from zerver.lib.test_classes import WebhookTestCase
class SlackWebhookTests(WebhookTestCase):
STREAM_NAME = "slack"
URL_TEMPLATE = "/api/v1/external/slack?stream={stream}&api_key={api_key}"
WEBHOOK_DIR_NAME = "slack"
def test_slack_channel_to_topic(self) -> None:
expected_topic = "channel: general"
expected_message = "**slack_user**: `test\n`"
self.check_webhook(
"message_info",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_slack_channel_to_stream(self) -> None:
self.STREAM_NAME = "general"
self.url = "{}{}".format(self.url, "&channels_map_to_topics=0")
expected_topic = "Message from Slack"
expected_message = "**slack_user**: `test\n`"
self.check_webhook(
"message_info",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_missing_data_user_name(self) -> None:
payload = self.get_body("message_info_missing_user_name")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'user_name' argument")
def test_missing_data_channel_name(self) -> None:
payload = self.get_body("message_info_missing_channel_name")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'channel_name' argument")
def test_missing_data_text(self) -> None:
payload = self.get_body("message_info_missing_text")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'text' argument")
def test_invalid_channels_map_to_topics(self) -> None:
payload = self.get_body("message_info")
url = "{}{}".format(self.url, "&channels_map_to_topics=abc")
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Error: channels_map_to_topics parameter other than 0 or 1")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("slack", fixture_name, file_type="txt")
| {
"content_hash": "fc60f18f23bb1a169b1f58011df71290",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 99,
"avg_line_length": 39.698412698412696,
"alnum_prop": 0.6353458616553379,
"repo_name": "hackerkid/zulip",
"id": "e86dfe5f8068c19a93cc38468437e0d9f549a234",
"size": "2501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/slack/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "397271"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "717106"
},
{
"name": "JavaScript",
"bytes": "3079595"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71210"
},
{
"name": "Python",
"bytes": "6876664"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119833"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
} |
"""Tests for the Open-Meteo integration."""
from unittest.mock import AsyncMock, MagicMock, patch
from open_meteo import OpenMeteoConnectionError
from pytest import LogCaptureFixture
from homeassistant.components.open_meteo.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_ZONE
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
async def test_load_unload_config_entry(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_open_meteo: AsyncMock,
) -> None:
"""Test the Open-Meteo configuration entry loading/unloading."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.LOADED
await hass.config_entries.async_unload(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
assert mock_config_entry.state is ConfigEntryState.NOT_LOADED
@patch(
"homeassistant.components.open_meteo.OpenMeteo.forecast",
side_effect=OpenMeteoConnectionError,
)
async def test_config_entry_not_ready(
mock_forecast: MagicMock,
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test the Open-Meteo configuration entry not ready."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_forecast.call_count == 1
assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_config_entry_zone_removed(
hass: HomeAssistant,
caplog: LogCaptureFixture,
) -> None:
"""Test the Open-Meteo configuration entry not ready."""
mock_config_entry = MockConfigEntry(
title="My Castle",
domain=DOMAIN,
data={CONF_ZONE: "zone.castle"},
unique_id="zone.castle",
)
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
assert "Zone 'zone.castle' not found" in caplog.text
| {
"content_hash": "3c4dfdba8bcc582bbd9e88a7cb14007f",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 70,
"avg_line_length": 33.48529411764706,
"alnum_prop": 0.7408871321914801,
"repo_name": "w1ll1am23/home-assistant",
"id": "38619bc09db592e6bb4902e1ba4ea21459982f20",
"size": "2277",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "tests/components/open_meteo/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""Module containing data providers for TextProperty.
"""
from soc.modules.seeder.logic.providers.string import StringProvider
from soc.modules.seeder.logic.providers.string import RandomPhraseProvider
import random
__authors__ = [
'"Felix Kerekes" <sttwister@gmail.com>',
]
# pylint: disable=W0223
class TextProvider(StringProvider):
"""Base class for all data providers that return text.
"""
pass
class RandomParagraphProvider(TextProvider, RandomPhraseProvider):
"""Data provider that returns a random paragraph.
"""
def getValue(self):
return ' '.join(RandomPhraseProvider.getValue(self)
for _ in range(random.randint(5, 10)))
class RandomPlainTextDocumentProvider(RandomParagraphProvider):
"""Data provider that returns a random plain text document.
"""
def getValue(self):
return '\n\n'.join(RandomParagraphProvider.getValue(self)
for _ in range(random.randint(5, 10)))
class RandomHtmlDocumentProvider(RandomParagraphProvider):
"""Data provider that returns a random HTML document.
"""
def getValue(self):
#TODO(sttwister): This could be improved
html = '<html><body>'
for _ in range(random.randint(5, 10)):
html += '<h1>' + RandomPhraseProvider.getValue(self) + '</h1>'
html += '<p>' + RandomParagraphProvider.getValue(self) + '</p>'
html += '</body></html>'
return html
class RandomMarkdownDocumentProvider(RandomParagraphProvider):
"""Data provider that returns a random Markdown document.
"""
def getValue(self):
#TODO(sttwister): This could be improved
markdown = ''
for _ in range(random.randint(5, 10)):
markdown += RandomPhraseProvider.getValue(self) + '\n===\n\n'
markdown += RandomParagraphProvider.getValue(self) + '\n\n'
return markdown
| {
"content_hash": "29e6de542e323a1f17468d59c70bc6ed",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 28.390625,
"alnum_prop": 0.693450742982939,
"repo_name": "SRabbelier/Melange",
"id": "191ab39936bb1821da03bd675ad9a3c3fc8a95ff",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/seeder/logic/providers/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
"""
The Pipelines module contains the base definition for
a generic Pipelines commands.
"""
from qds_sdk.actions import *
import json
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
log = logging.getLogger("qds_quest")
# Pattern matcher for s3 path
_URI_RE = re.compile(r's3://([^/]+)/?(.*)')
class PipelinesCmdLine:
"""qds_sdk.PipelinesCmdLine is the interface used by qds.py."""
@staticmethod
def parsers():
argparser = ArgumentParser(prog="qds.py pipelines",
description="Pipelines client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# Create
create = subparsers.add_parser("create", help="Create a new pipeline")
create.add_argument("--create-type", dest="create_type", required=True,
help="create_type=1 for assisted, "
"create_type=2 for jar, create_type=3 for code")
create.add_argument("--pipeline-name", dest="name", required=True,
help="Name of pipeline")
create.add_argument("--description", dest="description", default=None,
help="Pipeline description"),
create.add_argument("--cluster-label", dest="cluster_label",
default="default", help="Cluster label")
create.add_argument("-c", "--code", dest="code", help="query string")
create.add_argument("-f", "--script-location", dest="script_location",
help="Path where code to run is stored. "
"local file path")
create.add_argument("-l", "--language", dest="language",
help="Language for bring your own code, "
"valid values are python and scala")
create.add_argument("--jar-path", dest="jar_path",
help="Location of Jar")
create.add_argument("--user-arguments", dest="user_arguments",
help="Additional user arguments")
create.add_argument("--main-class-name", dest="main_class_name",
help="class name of your jar file. "
"Required for create_type=2(BYOJ)")
create.add_argument("--command-line-options",
dest="command_line_options",
help="command line options on property page.")
create.set_defaults(func=PipelinesCmdLine.create)
# Update/Edit
update_properties = subparsers.add_parser("update-property",
help="Update properties of "
"a existing pipeline")
update_properties.add_argument("--pipeline-id",
dest="pipeline_id",
required=True,
help='Id of pipeline which need to be updated')
update_properties.add_argument("--cluster-label", dest="cluster_label",
help="Update cluster label.")
update_properties.add_argument("--command-line-options", dest="command_line_options",
help="command line options on property page.")
update_properties.add_argument("--can-retry", dest="can_retry",
help="can retry true or false")
update_properties.set_defaults(func=PipelinesCmdLine.update_properties)
update_code = subparsers.add_parser("update-code",
help="Update code of a existing pipeline")
update_code.add_argument(
"-c", "--code", dest="code", help="query string")
update_code.add_argument("-f", "--script-location", dest="script_location",
help="Path where code to run is stored. local file path")
update_code.set_defaults(func=PipelinesCmdLine.update_code)
update_code.add_argument(
"--jar-path",
dest="jar_path",
help="Location of Jar")
update_code.add_argument("--user-arguments", dest="user_arguments",
help="Additional user arguments")
update_code.add_argument("--main-class-name", dest="main_class_name",
help="class name of your jar file. "
"Required for create_type=2(BYOJ)")
update_code.add_argument("--language", dest="language",
help="language of code scala or python")
update_code.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be updated')
# Pipeline Util (Utility for start, pause, clone, edit, delete,
# archive)
delete = subparsers.add_parser("delete", help="Delete Pipeline")
delete.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
delete.set_defaults(func=PipelinesCmdLine.delete)
status = subparsers.add_parser("status", help="Status of Pipeline")
status.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
status.set_defaults(func=PipelinesCmdLine.status)
start = subparsers.add_parser("start", help="Start Pipeline")
start.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
start.set_defaults(func=PipelinesCmdLine.start)
pause = subparsers.add_parser("pause", help="pause Pipeline")
pause.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
pause.set_defaults(func=PipelinesCmdLine.pause)
clone = subparsers.add_parser("clone", help="clone Pipeline")
clone.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
clone.set_defaults(func=PipelinesCmdLine.clone)
archive = subparsers.add_parser("archive", help="archive Pipeline")
archive.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
archive.set_defaults(func=PipelinesCmdLine.archive)
health = subparsers.add_parser("health", help="health of Pipeline")
health.add_argument("--pipeline-id", dest="pipeline_id", required=True,
help='Id of pipeline which need to be started')
health.set_defaults(func=PipelinesCmdLine.health)
# list
index = subparsers.add_parser("list", help="list of Pipeline.")
index.add_argument("--pipeline-status", dest="status", required=True,
help='Id of pipeline which need to be started. '
'Valid values = [active, archive, all, draft] ')
index.set_defaults(func=PipelinesCmdLine.index)
return argparser
@staticmethod
def run(args):
"""
Commandline method to run pipeline.
:param args:
:return:
"""
parser = PipelinesCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def delete(args):
"""
Commandline method to delete pipeline.
:param args:
:return:
"""
response = Pipelines.delete(args.pipeline_id)
return json.dumps(
response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def pause(args):
"""
Commandline method to pause pipeline.
:param args:
:return:
"""
response = Pipelines.pause(args.pipeline_id)
return json.dumps(
response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def archive(args):
"""
commandline method to archive active pipeline.
:param args:
:return:
"""
response = Pipelines.archive(args.pipeline_id)
return json.dumps(
response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def clone(args):
"""
Commandline method to clone pipeline
:param args:
:return:
"""
response = Pipelines.clone(args.pipeline_id)
return json.dumps(response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def status(args):
"""
CommandLine method to get pipeline status
:param args:
:return:
"""
response = Pipelines.get_status(args.pipeline_id)
return json.dumps(
response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def health(args):
"""
Commandline method to get health of pipeline.
:param args:
:return:
"""
response = Pipelines.get_health(args.pipeline_id)
return json.dumps(
response, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def start(args):
"""
Commandline method to start pipeline.
:param args:
:return:
"""
response = Pipelines.start(args.pipeline_id)
return json.dumps(response, sort_keys=True, indent=4)
@staticmethod
def index(args):
"""
Commandline method to list pipeline.
:param args:
:return:
"""
pipelinelist = Pipelines.list(args.status)
return json.dumps(
pipelinelist, default=lambda o: o.attributes, sort_keys=True, indent=4)
@staticmethod
def create(args):
"""
Commandline method to create pipeline.
:param args:
:return:
"""
pipeline = None
if int(args.create_type) == 2:
pipeline = PipelinesJar.create_pipeline(pipeline_name=args.name,
jar_path=args.jar_path,
main_class_name=args.main_class_name,
cluster_label=args.cluster_label,
user_arguments=args.user_arguments,
command_line_options=args.command_line_options)
elif int(args.create_type) == 3:
if args.code:
pipeline = PipelinesCode.create_pipeline(pipeline_name=args.name,
cluster_label=args.cluster_label,
code=args.code,
file_path=args.script_location,
language=args.language,
user_arguments=args.user_arguments,
command_line_options=args.command_line_options)
elif args.script_location:
pipeline = PipelinesCode.create_pipeline(pipeline_name=args.name,
cluster_label=args.cluster_label,
code=args.code,
file_path=args.script_location,
language=args.language,
user_arguments=args.user_arguments,
command_line_options=args.command_line_options)
return json.dumps(pipeline)
@staticmethod
def update_properties(args):
"""
Commandline method to update pipeline properties.
:param args:
:return:
"""
params = args.__dict__
log.debug(params)
Pipelines.add_property(pipeline_id=args.pipeline_id,
cluster_label=args.cluster_label,
can_retry=args.can_retry,
command_line_options=args.command_line_options)
@staticmethod
def update_code(args):
"""
Commandline method to update code/Jar_Path
:param args:
:return:
"""
if args.jar_path or args.main_class_name:
response = PipelinesJar.save_code(pipeline_id=args.pipeline_id,
code=args.code,
file_path=args.script_location,
language=args.language,
jar_path=args.jar_path,
user_arguments=args.user_arguments,
main_class_name=args.main_class_name)
elif args.code or args.script_location:
response = PipelinesCode.save_code(pipeline_id=args.pipeline_id,
code=args.code,
file_path=args.script_location,
language=args.language,
jar_path=args.jar_path,
user_arguments=args.user_arguments,
main_class_name=args.main_class_name)
return json.dumps(response, sort_keys=True, indent=4)
class Pipelines(Resource):
"""qds_sdk.Pipelines is the base Qubole Pipelines class."""
""" all commands use the /pipelines endpoint"""
rest_entity_path = "pipelines"
pipeline_id = None
pipeline_name = None
pipeline_code = None
jar_path = None
@staticmethod
def get_pipline_id(response):
return str(response.get('data').get('id'))
@staticmethod
def list(status=None):
"""
Method to list pipeline on the basis of status.
:param status: Valid values - all, draft, archive, active.
:return: List of pipeline in json format.
"""
if status is None or status.lower() == 'all':
params = {"filter": "draft,archive,active"}
else:
params = {"filter": status.lower()}
conn = Qubole.agent()
url_path = Pipelines.rest_entity_path
pipeline_list = conn.get(url_path, params)
return pipeline_list
@classmethod
def create(cls, pipeline_name, create_type, **kwargs):
"""
Create a pipeline object by issuing a POST
request to the /pipeline?mode=wizard endpoint
Note - this creates pipeline in draft mode
Args:
pipeline_name: Name to be given.
create_type: 1->Assisted, 2->Jar, 3->Code
**kwargs: keyword arguments specific to create type
Returns:
response
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path
if create_type is None:
raise ParseError("Provide create_type for Pipeline.", None)
if not kwargs or create_type == 1:
data = {
"data": {
"attributes": {
"name": pipeline_name,
"status": "DRAFT",
"create_type": create_type
},
"type": "pipeline"
}
}
url = url + "?mode=wizard"
else:
data = {
"data": {
"type": "pipeline",
"attributes": {
"name": pipeline_name,
"create_type": create_type,
"properties": {
"cluster_label": kwargs.get('cluster_label'),
"can_retry": kwargs.get('can_retry'),
"command_line_options": kwargs.get('command_line_options'),
"user_arguments": kwargs.get('user_arguments')
}
},
"relationships": {
"alerts": {
"data": {
"type": "pipeline/alerts",
"attributes": {
"can_notify": kwargs.get('can_notify'),
"notification_channels": kwargs.get('channel_ids')
}
}
}
}
}
}
if create_type == 2:
data['data']['attributes']['properties']['jar_path'] = \
kwargs.get('jar_path')
data['data']['attributes']['properties']['main_class_name'] = \
kwargs.get('main_class_name')
elif create_type == 3:
data['data']['attributes']['properties']['code'] = \
kwargs.get('code')
data['data']['attributes']['properties']['language'] = \
kwargs.get('language')
response = conn.post(url, data)
cls.pipeline_id = Pipelines.get_pipline_id(response)
cls.pipeline_name = pipeline_name
return response
@staticmethod
def start(pipeline_id):
"""
Method to start Pipeline
:param pipeline_id: id of pipeline to be deleted
:return: response
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/start"
response = conn.put(url)
pipeline_status = Pipelines.get_status(pipeline_id)
while pipeline_status == 'waiting':
log.info("Pipeline is in waiting state....")
time.sleep(10)
pipeline_status = response.get(
'data').get('pipeline_instance_status')
log.debug("State of pipeline is %s", pipeline_status)
return response
@staticmethod
def add_property(pipeline_id,
cluster_label,
checkpoint_location=None,
output_mode=None,
trigger_interval=None,
can_retry=True,
command_line_options=None):
"""
Method to add properties in pipeline
:param can_retry:
:param pipeline_id:
:param cluster_label:
:param checkpoint_location:
:param trigger_interval:
:param output_mode:
:param command_line_options:
:return:
"""
conn = Qubole.agent()
if command_line_options is None:
command_line_options = """--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true"""
data = {"data": {"attributes": {
"cluster_label": cluster_label,
"can_retry": can_retry,
"checkpoint_location": checkpoint_location,
"trigger_interval": trigger_interval,
"output_mode": output_mode,
"command_line_options": command_line_options
},
"type": "pipeline/properties"
}
}
log.info("Data {}".format(data))
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/properties"
response = conn.put(url, data)
log.debug(response)
return response
@classmethod
def save_code(cls, pipeline_id,
code=None,
file_path=None,
language=None,
jar_path=None,
main_class_name=None,
user_arguments=None):
"""
:param file_path:
:param code:
:param language:
:param user_arguments:
:param pipeline_id:
:param jar_path:
:param main_class_name:
:return:
"""
data = None
if cls.create_type == 2:
if jar_path is None or main_class_name is None:
raise ParseError(
"Provide Jar path for BYOJ mode.")
else:
cls.jar_path = jar_path
data = {"data": {
"attributes": {"create_type": cls.create_type,
"user_arguments": str(user_arguments),
"jar_path": str(jar_path),
"main_class_name": str(main_class_name)}}}
elif cls.create_type == 3:
if code or file_path:
try:
if file_path:
with open(file_path, 'r') as f:
code = f.read()
else:
code = code
except IOError as e:
raise ParseError("Unable to open script location or script "
"location and code both are empty. ",
e.message)
cls.pipeline_code = code
data = {"data": {
"attributes": {"create_type": cls.create_type, "user_arguments": str(user_arguments),
"code": str(code), "language": str(language)}}}
else:
raise ParseError(
"Provide code or file location for BYOC mode.")
conn = Qubole.agent()
url = cls.rest_entity_path + "/" + str(pipeline_id) + "/save_code"
response = conn.put(url, data)
log.debug(response)
return response
@staticmethod
def get_health(pipeline_id):
"""
Get Pipeline Health
:param pipeline_id:
:return:
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id
response = conn.get(url)
log.info(response)
return response.get("data").get("attributes").get("health")
@staticmethod
def clone(pipeline_id):
"""
Method to clone pipeline
:param pipeline_id:
:return:
"""
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/duplicate"
log.info("Cloning pipeline with id {}".format(pipeline_id))
conn = Qubole.agent()
return conn.post(url)
@staticmethod
def pause(pipeline_id):
"""
Method to pause pipeline
:param pipeline_id:
:return:
"""
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/pause"
log.info("Pausing pipeline with id {}".format(pipeline_id))
conn = Qubole.agent()
return conn.put(url)
@staticmethod
def archive(pipeline_id):
"""
Method to Archive pipeline
:param pipeline_id:
:return:
"""
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/archive"
log.info("Archiving pipeline with id {}".format(pipeline_id))
conn = Qubole.agent()
return conn.put(url)
@staticmethod
def get_status(pipeline_id):
"""
Get pipeline status
:param pipeline_id:
:return:
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id
response = conn.get(url)
log.debug(response)
return response.get("data").get(
"attributes").get("pipeline_instance_status")
@staticmethod
def delete(pipeline_id):
"""
Method to delete pipeline
:param pipeline_id:
:return:
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/delete"
log.info("Deleting Pipeline with id: {}".format(pipeline_id))
response = conn.put(url)
log.info(response)
return response
@staticmethod
def edit_pipeline_name(pipeline_id, pipeline_name):
"""
Method to edit pipeline name (Required in case of cloning)
:param pipeline_id:
:param pipeline_name:
:return:
"""
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id
data = {
"data": {
"attributes": {
"name": pipeline_name},
"type": "pipelines"}}
return conn.put(url, data)
@staticmethod
def set_alert(pipeline_id, channel_id):
"""
:param pipeline_id:
:param channel_id: List of channel's id
:return:
"""
data = {
"data": {"attributes": {
"event_type": "error",
"notification_channels": [channel_id],
"can_notify": True},
"type": "pipeline/alerts"
}
}
conn = Qubole.agent()
url = Pipelines.rest_entity_path + "/" + pipeline_id + "/alerts"
return conn.put(url, data)
@staticmethod
def get_code(pipeline_id):
"""
Get pipeline code
:param pipeline_id:
:return:
"""
url = Pipelines.rest_entity_path + "/" + pipeline_id
conn = Qubole.agent()
reponse = conn.get(url)
code = reponse.get("meta")["command_details"]["code"]
return code
class PipelinesCode(Pipelines):
create_type = 3
@staticmethod
def create_pipeline(pipeline_name,
cluster_label,
code=None,
file_path=None,
language=None,
can_retry=True,
channel_id=None,
command_line_options=None,
user_arguments=None):
"""
Method to create pipeline in BYOC mode in one go.
:param file_path:
:param code:
:param command_line_options:
:param user_arguments:
:param pipeline_name:
:param cluster_label:
:param language:
:param can_retry:
:param channel_id:
:return:
"""
PipelinesCode.create(pipeline_name, PipelinesCode.create_type)
pipeline_id = PipelinesCode.pipeline_id
response = PipelinesCode.add_property(pipeline_id, cluster_label,
can_retry=can_retry,
command_line_options=command_line_options)
log.debug(response)
response = PipelinesCode.save_code(pipeline_id,
code=code,
file_path=file_path,
language=language,
user_arguments=user_arguments)
if channel_id:
response = Pipelines.set_alert(pipeline_id, channel_id)
log.info(response)
return response
class PipelinesJar(Pipelines):
create_type = 2
@staticmethod
def create_pipeline(pipeline_name,
jar_path,
cluster_label,
main_class_name,
channel_id=None,
can_retry=True,
command_line_options=None,
user_arguments=None):
"""
Method to create pipeline in BYOJ mode
:param pipeline_name:
:param jar_path:
:param cluster_label:
:param main_class_name:
:param channel_id:
:param can_retry:
:param command_line_options:
:param user_arguments:
:return:
"""
PipelinesJar.create(pipeline_name, PipelinesJar.create_type)
pipeline_id = PipelinesJar.pipeline_id
PipelinesJar.add_property(pipeline_id,
cluster_label,
can_retry=can_retry,
command_line_options=command_line_options)
PipelinesJar.save_code(pipeline_id,
jar_path=jar_path,
main_class_name=main_class_name,
user_arguments=user_arguments)
PipelinesJar.jar_path = jar_path
if channel_id:
response = Pipelines.set_alert(pipeline_id, channel_id)
log.info(response)
return PipelinesJar
class PipelinesAssisted(Pipelines):
create_type = 1
@staticmethod
def add_source():
"""Method to add source."""
pass
@staticmethod
def add_sink():
"""Method to add sink."""
pass
@staticmethod
def create_pipeline():
"""Parent Method to create end to end pipeline."""
pass
@staticmethod
def add_operator():
"""Parent method to add operator"""
pass
@staticmethod
def _select_operator():
"""Method to add select operator."""
pass
@staticmethod
def _filter_operator():
"""Method to add filter operator."""
pass
@staticmethod
def _watermark_operator():
"""Method to add watermark operator"""
pass
@staticmethod
def _window_group_operator():
"""Method to add window group operator"""
pass
@staticmethod
def _source_kafka():
"""Method to as kafka as source."""
pass
@staticmethod
def _source_kinesis():
"""Method to add kinesis as source."""
pass
@staticmethod
def _source_s3():
"""Method to add s3 as source."""
pass
@staticmethod
def _source_google_storage():
"""Method to add google storage as source."""
pass
@staticmethod
def _sink_kafka():
"""Method to add kafka as sink."""
pass
@staticmethod
def _sink_s3():
"""Method to add s3 as sink."""
pass
@staticmethod
def _sink_hive():
"""method to add hive as sink."""
pass
@staticmethod
def _sink_snowflake():
"""Method to add Snowflake as sink"""
pass
@staticmethod
def _sink_google_storage():
"""Method to add google storage as sink"""
pass
@staticmethod
def _sink_BigQuery():
"""Method to add BigQuery as sink."""
pass
@staticmethod
def add_registry():
"""Method to add registry."""
pass
@staticmethod
def switch_from_assisted():
"""Method to switch to Assisted from BYOC or BYOJ mode."""
pass
| {
"content_hash": "9ff23a7599b08afba14a5ec1ce6da908",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 280,
"avg_line_length": 37.29545454545455,
"alnum_prop": 0.5093813143461946,
"repo_name": "qubole/qds-sdk-py",
"id": "ffe6f64d31816c40b93eed239603e49225c67840",
"size": "31179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qds_sdk/pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "839004"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.