repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ensime/ensime-vim
|
ensime_shared/ticker.py
|
Python
|
mit
| 956
| 0.001046
|
REFRESH_TIMER = 1000
c
|
lass Ticker(object):
def __init__(self, _vim):
self._vim = _vim
self.has_timers = bool(int(self._vim.eval("has('timers')")))
if self.has_timers:
self._timer = None
self._start_refresh_timer()
def tick(self, client):
filename = client.editor.path()
# XXX is this necessary ?
if not client.editor.is_buffer_ensime_compatible():
return
client.tick(filename)
if n
|
ot self.has_timers:
self._repeat_cursor_hold()
def _repeat_cursor_hold(self):
self._vim.options['updatetime'] = REFRESH_TIMER
self._vim.command('call feedkeys("f\e")')
def _start_refresh_timer(self):
"""Start the Vim timer. """
if not self._timer:
self._timer = self._vim.eval(
"timer_start({}, 'EnTick', {{'repeat': -1}})"
.format(REFRESH_TIMER)
)
|
deepjets/deepjets
|
deepjets/testdata/__init__.py
|
Python
|
bsd-3-clause
| 207
| 0
|
import os
from pkg_resources import resource_filename
__all__ = [
'get_filepath',
]
def get_filepa
|
th(name='sherpa_wz.hepmc'):
ret
|
urn resource_filename('deepjets', os.path.join('testdata', name))
|
leighpauls/k2cro4
|
third_party/python_26/Lib/site-packages/win32comext/axscript/client/framework.py
|
Python
|
bsd-3-clause
| 36,696
| 0.030494
|
""
|
"AXScript Client Framework
This module provides a core framework for an ActiveX Scripting client.
Derived classes actually implement the AX Client itself, including the
scoping rules, etc.
There are classes defined for the engine itself, and for ScriptItems
"""
import sys
from win32com.axscript import axscript
import win32com.server.util
import win32com.client.connect # Need simple connection point support
import win32api, winerror
import pythoncom
import types
import re
def RemoveCR(t
|
ext):
# No longer just "RemoveCR" - should be renamed to
# FixNewlines, or something. Idea is to fix arbitary newlines into
# something Python can compile...
return re.sub('(\r\n)|\r|(\n\r)','\n',text)
SCRIPTTEXT_FORCEEXECUTION = -2147483648 # 0x80000000
SCRIPTTEXT_ISEXPRESSION = 0x00000020
SCRIPTTEXT_ISPERSISTENT = 0x00000040
from win32com.server.exception import Exception, IsCOMServerException
import error # ax.client.error
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def profile(fn, *args):
import profile
prof = profile.Profile()
try:
# roll on 1.6 :-)
# return prof.runcall(fn, *args)
return apply(prof.runcall, (fn,) + args)
finally:
import pstats
# Damn - really want to send this to Excel!
# width, list = pstats.Stats(prof).strip_dirs().get_print_list([])
pstats.Stats(prof).strip_dirs().sort_stats("time").print_stats()
class SafeOutput:
softspace=1
def __init__(self, redir=None):
if redir is None: redir = sys.stdout
self.redir=redir
def write(self,message):
try:
self.redir.write(message)
except:
win32api.OutputDebugString(message.encode('mbcs'))
def flush(self):
pass
def close(self):
pass
# Make sure we have a valid sys.stdout/stderr, otherwise out
# print and trace statements may raise an exception
def MakeValidSysOuts():
if not isinstance(sys.stdout, SafeOutput):
sys.stdout = sys.stderr = SafeOutput()
# and for the sake of working around something I can't understand...
# prevent keyboard interrupts from killing IIS
import signal
def noOp(a,b):
# it would be nice to get to the bottom of this, so a warning to
# the debug console can't hurt.
print "WARNING: Ignoring keyboard interrupt from ActiveScripting engine"
# If someone else has already redirected, then assume they know what they are doing!
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
try:
signal.signal(signal.SIGINT, noOp)
except ValueError:
# Not the main thread - can't do much.
pass
def trace(*args):
"""A function used instead of "print" for debugging output.
"""
for arg in args:
print arg,
print
def RaiseAssert(scode, desc):
"""A debugging function that raises an exception considered an "Assertion".
"""
print "**************** ASSERTION FAILED *******************"
print desc
raise Exception(scode, desc)
class AXScriptCodeBlock:
"""An object which represents a chunk of code in an AX Script
"""
def __init__(self, name, codeText, sourceContextCookie, startLineNumber, flags):
self.name = name
self.codeText = codeText
self.codeObject = None
self.sourceContextCookie = sourceContextCookie
self.startLineNumber = startLineNumber
self.flags = flags
self.beenExecuted = 0
def GetFileName(self):
# Gets the "file name" for Python - uses <...> so Python doesnt think
# it is a real file.
return "<%s>" % self.name
def GetDisplayName(self):
return self.name
def GetLineNo(self, no):
pos = -1
for i in range(no-1):
pos = self.codeText.find('\n', pos+1)
if pos==-1: pos=len(self.codeText)
epos = self.codeText.find('\n', pos+1)
if epos==-1:
epos=len(self.codeText)
return self.codeText[pos+1:epos].strip()
class Event:
"""A single event for a ActiveX named object.
"""
def __init__(self):
self.name = "<None>"
def __repr__(self):
return "<%s at %d: %s>" % (self.__class__.__name__, id(self), self.name)
def Reset(self):
pass
def Close(self):
pass
def Build(self, typeinfo, funcdesc):
self.dispid = funcdesc[0]
self.name = typeinfo.GetNames(self.dispid)[0]
# print "Event.Build() - Event Name is ", self.name
class EventSink:
"""A set of events against an item. Note this is a COM client for connection points.
"""
_public_methods_ = []
def __init__(self, myItem, coDispatch):
self.events = {}
self.connection = None
self.coDispatch = coDispatch
self.myScriptItem = myItem
self.myInvokeMethod = myItem.GetEngine().ProcessScriptItemEvent
self.iid = None
def Reset(self):
self.Disconnect()
def Close(self):
self.iid = None
self.myScriptItem = None
self.myInvokeMethod = None
self.coDispatch = None
for event in self.events.values():
event.Reset()
self.events = {}
self.Disconnect()
# COM Connection point methods.
def _query_interface_(self, iid):
if iid==self.iid:
return win32com.server.util.wrap(self)
def _invoke_(self, dispid, lcid, wFlags, args):
try:
event = self.events[dispid]
except:
raise Exception(scode=winerror.DISP_E_MEMBERNOTFOUND)
#print "Invoke for ", event, "on", self.myScriptItem, " - calling", self.myInvokeMethod
return self.myInvokeMethod(self.myScriptItem, event, lcid, wFlags, args)
def GetSourceTypeInfo(self, typeinfo):
"""Gets the typeinfo for the Source Events for the passed typeinfo"""
attr = typeinfo.GetTypeAttr()
cFuncs = attr[6]
typeKind = attr[5]
if typeKind not in [pythoncom.TKIND_COCLASS, pythoncom.TKIND_INTERFACE]:
RaiseAssert(winerror.E_UNEXPECTED, "The typeKind of the object is unexpected")
cImplType = attr[8]
for i in xrange(cImplType):
# Look for the [source, default] interface on the coclass
# that isn't marked as restricted.
flags = typeinfo.GetImplTypeFlags(i)
flagsNeeded = pythoncom.IMPLTYPEFLAG_FDEFAULT | pythoncom.IMPLTYPEFLAG_FSOURCE
if (flags & ( flagsNeeded | pythoncom.IMPLTYPEFLAG_FRESTRICTED))==(flagsNeeded):
# Get the handle to the implemented interface.
href = typeinfo.GetRefTypeOfImplType(i)
return typeinfo.GetRefTypeInfo(href)
def BuildEvents(self):
# See if it is an extender object.
try:
mainTypeInfo = self.coDispatch.QueryInterface(axscript.IID_IProvideMultipleClassInfo)
isMulti = 1
numTypeInfos = mainTypeInfo.GetMultiTypeInfoCount()
except pythoncom.com_error:
isMulti = 0
numTypeInfos = 1
try:
mainTypeInfo = self.coDispatch.QueryInterface(pythoncom.IID_IProvideClassInfo)
except pythoncom.com_error:
numTypeInfos = 0
# Create an event handler for the item.
for item in xrange(numTypeInfos):
if isMulti:
typeinfo, flags = mainTypeInfo.GetInfoOfIndex(item, axscript.MULTICLASSINFO_GETTYPEINFO)
else:
typeinfo = mainTypeInfo.GetClassInfo()
sourceType = self.GetSourceTypeInfo(typeinfo)
cFuncs = 0
if sourceType:
attr = sourceType.GetTypeAttr()
self.iid = attr[0]
cFuncs = attr[6]
for i in xrange(cFuncs):
funcdesc = sourceType.GetFuncDesc(i)
event = Event()
event.Build(sourceType, funcdesc)
self.events[event.dispid] = event
def Connect(self):
if self.connection is not None or self.iid is None: return
# trace("Connect for sink item", self.myScriptItem.name, "with IID",str(self.iid))
self.connection = win32com.client.connect.SimpleConnection(self.coDispatch, self, self.iid)
def Disconnect(self):
if self.connection:
try:
self.connection.Disconnect()
except pythoncom.com_error:
pass # Ignore disconnection errors.
self.connection = None
class ScriptItem:
"""An item (or subitem) that is exposed to the ActiveX script
"""
def __init__(self, parentItem, name, dispatch, flags):
self.parentItem = parentItem
self.dispatch = dispatch
self.name = name
self.flags = flags
self.eventSink = None
self.subItems = {}
self.createdConnections = 0
self.isRegistered = 0
# trace("Creating ScriptItem", name, "of parent", parentItem,"wi
|
openrural/open-data-nc
|
opendata/requests/forms.py
|
Python
|
mit
| 1,046
| 0
|
from django import forms
from selectable.forms import AutoCompleteSelectField
from selectable.forms import AutoCompleteSelectWidget
from opendata.catalog.lookups import CityLookup, CountyLookup
from .models import Request
class SearchForm(forms.Form):
|
text = forms.CharField(required=False)
class RequestForm(forms.ModelForm):
county = AutoCompleteSelectField(
lookup_class=CountyLookup,
required=False,
widget=AutoCompleteSelectWidget(
lookup_class=CountyLookup,
attrs={"class": "suggestions-hidden suggestions-county"},
)
)
city = AutoCompleteSelectField(
|
lookup_class=CityLookup,
required=False,
widget=AutoCompleteSelectWidget(
lookup_class=CityLookup,
attrs={"class": "suggestions-hidden suggestions-city"},
)
)
class Meta:
model = Request
exclude = ('suggested_by', 'resources', 'rating', 'status', )
class Media:
js = (
"suggestions/js/form.js",
)
|
chenmoshushi/libsvm
|
python_old/svm.py
|
Python
|
bsd-3-clause
| 8,155
| 0.045984
|
import svmc
from svmc import C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR
from svmc imp
|
ort LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED
from math import exp, fabs
def _int_array(seq):
size = len(seq)
array = svmc.new_i
|
nt(size)
i = 0
for item in seq:
svmc.int_setitem(array,i,item)
i = i + 1
return array
def _double_array(seq):
size = len(seq)
array = svmc.new_double(size)
i = 0
for item in seq:
svmc.double_setitem(array,i,item)
i = i + 1
return array
def _free_int_array(x):
if x != 'NULL' and x != None:
svmc.delete_int(x)
def _free_double_array(x):
if x != 'NULL' and x != None:
svmc.delete_double(x)
def _int_array_to_list(x,n):
return map(svmc.int_getitem,[x]*n,range(n))
def _double_array_to_list(x,n):
return map(svmc.double_getitem,[x]*n,range(n))
class svm_parameter:
# default values
default_parameters = {
'svm_type' : C_SVC,
'kernel_type' : RBF,
'degree' : 3,
'gamma' : 0, # 1/num_features
'coef0' : 0,
'nu' : 0.5,
'cache_size' : 100,
'C' : 1,
'eps' : 1e-3,
'p' : 0.1,
'shrinking' : 1,
'nr_weight' : 0,
'weight_label' : [],
'weight' : [],
'probability' : 0
}
def __init__(self,**kw):
self.__dict__['param'] = svmc.new_svm_parameter()
for attr,val in self.default_parameters.items():
setattr(self,attr,val)
for attr,val in kw.items():
setattr(self,attr,val)
def __getattr__(self,attr):
get_func = getattr(svmc,'svm_parameter_%s_get' % (attr))
return get_func(self.param)
def __setattr__(self,attr,val):
if attr == 'weight_label':
self.__dict__['weight_label_len'] = len(val)
val = _int_array(val)
_free_int_array(self.weight_label)
elif attr == 'weight':
self.__dict__['weight_len'] = len(val)
val = _double_array(val)
_free_double_array(self.weight)
set_func = getattr(svmc,'svm_parameter_%s_set' % (attr))
set_func(self.param,val)
def __repr__(self):
ret = '<svm_parameter:'
for name in dir(svmc):
if name[:len('svm_parameter_')] == 'svm_parameter_' and name[-len('_set'):] == '_set':
attr = name[len('svm_parameter_'):-len('_set')]
if attr == 'weight_label':
ret = ret+' weight_label = %s,' % _int_array_to_list(self.weight_label,self.weight_label_len)
elif attr == 'weight':
ret = ret+' weight = %s,' % _double_array_to_list(self.weight,self.weight_len)
else:
ret = ret+' %s = %s,' % (attr,getattr(self,attr))
return ret+'>'
def __del__(self):
_free_int_array(self.weight_label)
_free_double_array(self.weight)
svmc.delete_svm_parameter(self.param)
def _convert_to_svm_node_array(x):
""" convert a sequence or mapping to an svm_node array """
import operator
# Find non zero elements
iter_range = []
if isinstance(x, dict):
for k, v in x.iteritems():
# all zeros kept due to the precomputed kernel; no good solution yet
# if v != 0:
iter_range.append( k )
elif operator.isSequenceType(x):
for j in range(len(x)):
# if x[j] != 0:
iter_range.append( j )
else:
raise TypeError,"data must be a mapping or a sequence"
iter_range.sort()
data = svmc.svm_node_array(len(iter_range)+1)
svmc.svm_node_array_set(data,len(iter_range),-1,0)
j = 0
for k in iter_range:
svmc.svm_node_array_set(data,j,k,x[k])
j = j + 1
return data
class svm_problem:
def __init__(self,y,x):
assert len(y) == len(x)
self.prob = prob = svmc.new_svm_problem()
self.size = size = len(y)
self.y_array = y_array = svmc.new_double(size)
for i in range(size):
svmc.double_setitem(y_array,i,y[i])
self.x_matrix = x_matrix = svmc.svm_node_matrix(size)
self.data = []
self.maxlen = 0;
for i in range(size):
data = _convert_to_svm_node_array(x[i])
self.data.append(data);
svmc.svm_node_matrix_set(x_matrix,i,data)
if type(x[i]) == dict:
if (len(x[i]) > 0):
self.maxlen = max(self.maxlen,max(x[i].keys()))
else:
self.maxlen = max(self.maxlen,len(x[i]))
svmc.svm_problem_l_set(prob,size)
svmc.svm_problem_y_set(prob,y_array)
svmc.svm_problem_x_set(prob,x_matrix)
def __repr__(self):
return "<svm_problem: size = %s>" % (self.size)
def __del__(self):
svmc.delete_svm_problem(self.prob)
svmc.delete_double(self.y_array)
for i in range(self.size):
svmc.svm_node_array_destroy(self.data[i])
svmc.svm_node_matrix_destroy(self.x_matrix)
class svm_model:
def __init__(self,arg1,arg2=None):
if arg2 == None:
# create model from file
filename = arg1
self.model = svmc.svm_load_model(filename)
else:
# create model from problem and parameter
prob,param = arg1,arg2
self.prob = prob
if param.gamma == 0:
param.gamma = 1.0/prob.maxlen
msg = svmc.svm_check_parameter(prob.prob,param.param)
if msg: raise ValueError, msg
self.model = svmc.svm_train(prob.prob,param.param)
#setup some classwide variables
self.nr_class = svmc.svm_get_nr_class(self.model)
self.svm_type = svmc.svm_get_svm_type(self.model)
#create labels(classes)
intarr = svmc.new_int(self.nr_class)
svmc.svm_get_labels(self.model,intarr)
self.labels = _int_array_to_list(intarr, self.nr_class)
svmc.delete_int(intarr)
#check if valid probability model
self.probability = svmc.svm_check_probability_model(self.model)
def predict(self,x):
data = _convert_to_svm_node_array(x)
ret = svmc.svm_predict(self.model,data)
svmc.svm_node_array_destroy(data)
return ret
def get_nr_class(self):
return self.nr_class
def get_labels(self):
if self.svm_type == NU_SVR or self.svm_type == EPSILON_SVR or self.svm_type == ONE_CLASS:
raise TypeError, "Unable to get label from a SVR/ONE_CLASS model"
return self.labels
def predict_values_raw(self,x):
#convert x into svm_node, allocate a double array for return
n = self.nr_class*(self.nr_class-1)//2
data = _convert_to_svm_node_array(x)
dblarr = svmc.new_double(n)
svmc.svm_predict_values(self.model, data, dblarr)
ret = _double_array_to_list(dblarr, n)
svmc.delete_double(dblarr)
svmc.svm_node_array_destroy(data)
return ret
def predict_values(self,x):
v=self.predict_values_raw(x)
if self.svm_type == NU_SVR or self.svm_type == EPSILON_SVR or self.svm_type == ONE_CLASS:
return v[0]
else: #self.svm_type == C_SVC or self.svm_type == NU_SVC
count = 0
d = {}
for i in range(len(self.labels)):
for j in range(i+1, len(self.labels)):
d[self.labels[i],self.labels[j]] = v[count]
d[self.labels[j],self.labels[i]] = -v[count]
count += 1
return d
def predict_probability(self,x):
#c code will do nothing on wrong type, so we have to check ourself
if self.svm_type == NU_SVR or self.svm_type == EPSILON_SVR:
raise TypeError, "call get_svr_probability or get_svr_pdf for probability output of regression"
elif self.svm_type == ONE_CLASS:
raise TypeError, "probability not supported yet for one-class problem"
#only C_SVC,NU_SVC goes in
if not self.probability:
raise TypeError, "model does not support probabiliy estimates"
#convert x into svm_node, alloc a double array to receive probabilities
data = _convert_to_svm_node_array(x)
dblarr = svmc.new_double(self.nr_class)
pred = svmc.svm_predict_probability(self.model, data, dblarr)
pv = _double_array_to_list(dblarr, self.nr_class)
svmc.delete_double(dblarr)
svmc.svm_node_array_destroy(data)
p = {}
for i in range(len(self.labels)):
p[self.labels[i]] = pv[i]
return pred, p
def get_svr_probability(self):
#leave the Error checking to svm.cpp code
ret = svmc.svm_get_svr_probability(self.model)
if ret == 0:
raise TypeError, "not a regression model or probability information not available"
return ret
def get_svr_pdf(self):
#get_svr_probability will handle error checking
sigma = self.get_svr_probability()
return lambda z: exp(-fabs(z)/sigma)/(2*sigma)
def save(self,filename):
svmc.svm_save_model(filename,self.model)
def __del__(self):
svmc.svm_destroy_model(self.model)
def cross_validation(prob, param, fold):
if param.gamma == 0:
param.gamma = 1.0/prob.maxlen
dblarr = svmc.new_double(prob.size)
svmc.svm_cross_validation(prob.prob, param.param, fold, dblarr)
ret = _double_array_to_list(dblarr, prob.size)
svmc.delete_double(dblarr)
return ret
|
TheAnosmic/cheetahs_byte
|
compile/break_to_atoms.py
|
Python
|
gpl-2.0
| 980
| 0
|
from node import NodeChain
from opcode_ import OPCode
def node_to_atom(node, iterator):
arg_size = node.get_arg_size()
atom = NodeChain(node)
while arg_size > 0:
try:
arg = iterator.next()
except StopIteration:
raise ValueError("Not enough arguments")
if isinstance(arg, OPCode):
raise ValueError("Currently not supporting"
|
" OPCode args")
atom.append(arg)
arg_size -= arg.get_size()
if arg_size < 0:
original_arg_size = node.get_arg_size()
real_arg_size = original_arg_size + abs(arg_size)
raise ValueError("Argument size mismatch,"
"Expecting: %s, Got %s"
"" % (original_arg_size, real_arg_size))
return atom
def break_to_atoms(node_chain):
i = iter(node_chain)
atoms = NodeChain()
for
|
node in i:
atoms.append(node_to_atom(node, i))
return atoms
|
aeklant/scipy
|
scipy/stats/tests/test_kdeoth.py
|
Python
|
bsd-3-clause
| 14,694
| 0.001157
|
from scipy import stats
import numpy as np
from numpy.testing import (assert_almost_equal, assert_,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_allclose)
import pytest
from pytest import raises as assert_raises
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_1d_weighted():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
wn = np.random.rand(n_basesample)
xnmean = np.average(xn, weights=wn)
xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn))
# get kde for original sample
gkde = stats.gaussian_kde(xn, weights=wn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrat
|
e_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(
|
gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
@pytest.mark.slow
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
@pytest.mark.slow
def test_kde_2d_weighted():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
wn = np.random.rand(n_basesample)
# get kde for original sample
gkde = stats.gaussian_kde(xn, weights=wn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
def test_kde_bandwidth_method_weighted():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.neff, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2 * np.pi * self.covariance))
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * se
|
imperodesign/paas-tools
|
deis/stg/controller/api/south_migrations/0024_auto__chg_field_key_fingerprint__del_unique_key_owner_id__add_unique_k.py
|
Python
|
mit
| 12,806
| 0.008199
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Key', fields ['owner', 'id']
db.delete_unique(u'api_key', ['owner_id', 'id'])
# Changing field 'Key.fingerprint'
db.alter_column(u'api_key', 'fingerprint', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Adding unique constraint on 'Key', fields ['owner', 'fingerprint']
db.create_unique(u'api_key', ['owner_id', 'fingerprint'])
def backwards(self, orm):
# Removing unique constraint on 'Key', fields ['owner', 'fingerprint']
db.delete_unique(u'api_key', ['owner_id', 'fingerprint'])
# Changing field 'Key.fingerprint'
db.alter_column(u'api_key', 'fingerprint', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Adding unique constraint on 'Key', fields ['owner', 'id']
db.create_unique(u'api_key', ['owner_id', 'id'])
models = {
u'api.app': {
'Meta': {'object_name': 'App'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'default': "'grassy-kerchief'", 'unique': 'True', 'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'structure': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.build': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Build'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dockerfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'pr
|
ocfile': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'updated': ('
|
django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.certificate': {
'Meta': {'object_name': 'Certificate'},
'certificate': ('django.db.models.fields.TextField', [], {}),
'common_name': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'api.config': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Config'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'cpu': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'memory': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'tags': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'values': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
u'api.container': {
'Meta': {'ordering': "[u'created']", 'object_name': 'Container'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Release']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.domain': {
'Meta': {'object_name': 'Domain'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'api.key': {
'Meta': {'unique_together': "((u'owner', u'fingerprint'),)", 'object_name': 'Key'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'public': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.push': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Push'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'receive_repo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'receive_user': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'ssh_connection': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
dreamhost/ceilometer
|
tests/api/v2/test_max_project_volume.py
|
Python
|
apache-2.0
| 5,448
| 0
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Steven Berler <steven.berler@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test getting the max resource volume.
"""
import datetime
from oslo.config import cfg
from ceilometer.collector import meter
from ceilometer import counter
from ceilometer.storage.impl_mongodb import require_map_reduce
from .base import FunctionalTest
class TestMaxProjectVolume(FunctionalTest):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestMaxProjectVolume, self).setUp()
require_map_reduce(self.conn)
self.counters = []
for i in range(3):
c = counter.Counter(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id-%s' % i,
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
}
)
self.counters.append(c)
msg = meter.meter_message_from_counter(c,
cfg.CONF.metering_secret,
'source1',
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 3)
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 2)
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual(data, [])
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
|
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30
|
:00',
},
])
self.assertEqual(data[0]['max'], 5)
self.assertEqual(data[0]['count'], 1)
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual(data, [])
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(data[0]['max'], 6)
self.assertEqual(data[0]['count'], 1)
|
apache/libcloud
|
libcloud/compute/drivers/gce.py
|
Python
|
apache-2.0
| 389,823
| 0.000441
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Compute Engine Driver.
"""
from __future__ import with_statement
import datetime
import time
import itertools
import sys
from libcloud.common.base import LazyObject
from libcloud.common.google import GoogleOAuth2Credential
from libcloud.common.google import GoogleResponse
from libcloud.common.google import GoogleBaseConnection
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceNotFoundError
from libcloud.common.google import ResourceExistsError
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation
from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot
from libcloud.compute.base import UuidMixin
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.utils.iso8601 import parse_date
from libcloud.pricing import get_pricing
API_VERSION = "v1"
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
def timestamp_to_datetime(timestamp):
"""
Return a datetime object that corresponds to the time in an RFC3339
timestamp.
:param timestamp: RFC3339 timestamp string
:type timestamp: ``str``
:return: Datetime object corresponding to timestamp
:rtype: :class:`datetime.datetime`
"""
# We remove timezone offset and microseconds (Python 2.5 strptime doesn't
# support %f)
ts = datetime.datetime.strptime(timestamp[:-10], "%Y-%m-%dT%H:%M:%S")
tz_hours = int(timestamp[-5:-3])
tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + "1")
tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins)
return ts + tz_delta
class GCEResponse(GoogleResponse):
pass
class GCEConnection(GoogleBaseConnection):
"""
Connection class for the GCE driver.
GCEConnection extends :class:`google.GoogleBaseConnection` for 3 reasons:
1. modify request_path for GCE URI.
2. Implement gce_params functionality described below.
3. Add request_aggregated_items method for making aggregated API calls.
If the parameter gce_params is set to a dict prior to calling request(),
the URL parameters will be updated to include those key/values FOR A
SINGLE REQUEST. If the response contains a nextPageToken,
gce_params['pageToken'] will be set to its value. This can be used to
implement paging in list:
>>> params, more_results = {'maxResults': 2}, True
>>> while more_results:
... driver.connection.gce_params=params
... driver.ex_list_urlmaps()
... more_results = 'pageToken' in params
...
[<GCEUrlMap id="..." name="cli-map">, <GCEUrlMap id="..." name="lc-map">]
[<GCEUrlMap id="..." name="web-map">]
"""
host = "www.googleapis.com"
responseCls = GCEResponse
def __init__(
self,
user_id,
key,
secure,
auth_type=None,
credential_file=None,
project=None,
**kwargs,
):
super(GCEConnection, self).__init__(
user_id,
key,
secure=secure,
auth_type=auth_type,
credential_file=credential_file,
**kwargs,
)
self.request_path = "/compute/%s/projects/%s" % (API_VERSION, project)
self.gce_params = None
def pre_connect_hook(self, params, headers):
"""
Update URL parameters with values from self.gce_params.
@inherits: :class:`GoogleBaseConnection.pre_connect_hook`
"""
params, headers = super(GCEConnection, self).pre_connect_hook(params, headers)
if self.gce_params:
params.update(self.gce_params)
return params, headers
def paginated_request(self, *args, **kwargs):
"""
Generic function to create a paginated request to any API call
not only aggregated or zone ones as request_aggregated_items.
@inherits: :class:`GoogleBaseConnection.request`
"""
|
more_results = True
items = []
max_results = kwargs["max_results"] if "max_results" in kwargs else 500
params = {"maxResults": max_results}
while mo
|
re_results:
self.gce_params = params
response = self.request(*args, **kwargs)
items.extend(response.object.get("items", []))
more_results = "pageToken" in params
return {"items": items}
def request(self, *args, **kwargs):
"""
Perform request then do GCE-specific processing of URL params.
@inherits: :class:`GoogleBaseConnection.request`
"""
response = super(GCEConnection, self).request(*args, **kwargs)
# If gce_params has been set, then update the pageToken with the
# nextPageToken so it can be used in the next request.
if self.gce_params:
if "nextPageToken" in response.object:
self.gce_params["pageToken"] = response.object["nextPageToken"]
elif "pageToken" in self.gce_params:
del self.gce_params["pageToken"]
self.gce_params = None
return response
def request_aggregated_items(self, api_name, zone=None):
"""
Perform request(s) to obtain all results from 'api_name'.
This method will make requests to the aggregated 'api_name' until
all results are received. It will then, through a helper function,
combine all results and return a single 'items' dictionary.
:param api_name: Name of API to call. Consult API docs
for valid names.
:type api_name: ``str``
:param zone: Optional zone to use.
:type zone: :class:`GCEZone`
:return: dict in the format of the API response.
format: { 'items': {'key': {api_name: []}} }
ex: { 'items': {'zones/us-central1-a': {disks: []}} }
:rtype: ``dict``
"""
if zone:
request_path = "/zones/%s/%s" % (zone.name, api_name)
else:
request_path = "/aggregated/%s" % (api_name)
api_responses = []
params = {"maxResults": 500}
more_results = True
while more_results:
self.gce_params = params
response = self.request(request_path, method="GET").object
if "items" in response:
if zone:
# Special case when we are handling pagination for a
# specific zone
items = response["items"]
response["items"] = {"zones/%s" % (zone): {api_name: items}}
api_responses.append(response)
more_results = "pageToken" in params
return self._merge_response_items(api_name, api_responses)
def _merge_response_items(self, list_name, response_list):
"""
Take a list of API responses ("item"-portion only) and combine them.
Helper function to combine multiple aggegrated responses into a single
dictionary that resembles an API response.
Note: keys that don't have a 'list_name" key (including warnings)
are omitted.
:param list_name: Name of list in dict. Practically, this is
the name of the API called (e.g. 'disks').
:type list_name: ``str``
:param response_list: list
|
tomevans/utils
|
constants.py
|
Python
|
gpl-2.0
| 370
| 0.008108
|
import
|
scipy
# Units in SI, i.e. not cgs
RSUN = 6.955e8
MSUN = 1.9889e30
MJUP = 1.8986e27
RJUP = 7.149e7
REARTH = 6.371e6
DAY2S = 86400.0
DEG2RAD = scipy.pi/180.
AU = 1.4
|
96e11
PLANCKH = 6.626e-34
BOLTZK = 1.38065e-23
C = 2.9979e8 # peed of light in vacuum in m s^-1
G = 6.673e-11 # gravitational constant in m^3 kg^-1 s^-2
RGAS = 8.314 # gas constant in J mol^-1 K^-1
|
8u1a/my_matasano_crypto_challenges
|
set1/challenge7.py
|
Python
|
unlicense
| 4,632
| 0.002375
|
__author__ = 'christianbuia'
from Crypto.Cipher import AES
import base64
#-----------------------------------------------------------------------------------------------------------------------
def solve_challenge(b64_crypt):
ciphertext = base64.decodebytes(bytes(b64_crypt, "ascii"))
key="YELLOW SUBMARINE"
#http://bityard.blogspot.com/2010/10/symmetric-encryption-with-pycrypto-part.html
decobj = AES.new(key, AES.MODE_ECB)
plaintext = decobj.decrypt(ciphertext)
print(plaintext.decode("utf-8"))
return True
#=======================================================================================================================
b64_crypt = """CRIwqt4+szDbqkNY+I0qbDe3LQz0wiw0SuxBQtAM5TDdMbjCMD/venUDW9BL
PEXODbk6a48oMbAY6DDZsuLbc0uR9cp9hQ0QQGATyyCESq2NSsvhx5zKlLtz
dsnfK5ED5srKjK7Fz4Q38/ttd+stL/9WnDzlJvAo7WBsjI5YJc2gmAYayNfm
CW2lhZE/ZLG0CBD2aPw0W417QYb4cAIOW92jYRiJ4PTsBBHDe8o4JwqaUac6
rqdi833kbyAOV/Y2RMbN0oDb9Rq8uRHvbrqQJaJieaswEtMkgUt3P5Ttgeh7
J+hE6TR0uHot8WzHyAKNbUWHoi/5zcRCUipvVOYLoBZXlNu4qnwoCZRSBgvC
wTdz3Cbsp/P2wXB8tiz6l9rL2bLhBt13Qxyhhu0H0+JKj6soSeX5ZD1Rpilp
9ncR1tHW8+uurQKyXN4xKeGjaKLOejr2xDIw+aWF7GszU4qJhXBnXTIUUNUf
RlwEpS6FZcsMzemQF30ezSJHfpW7DVHzwiLyeiTJRKoVUwo43PXupnJXDmUy
sCa2nQz/iEwyor6kPekLv1csm1Pa2LZmbA9Ujzz8zb/gFXtQqBAN4zA8/wt0
VfoOsEZwcsaLOWUPtF/Ry3VhlKwXE7gGH/bbShAIKQqMqqUkEucZ3HPHAVp7
ZCn3Ox6+c5QJ3Uv8V7L7SprofPFN6F+kfDM4zAc59do5twgDoClCbxxG0L19
TBGHiYP3CygeY1HLMrX6KqypJfFJW5O9wNIF0qfOC2lWFgwayOwq41xdFSCW
0/EBSc7cJw3N06WThrW5LimAOt5L9c7Ik4YIxu0K9JZwAxfcU4ShYu6euYmW
LP98+qvRnIrXkePugS9TSOJOHzKUoOcb1/KYd9NZFHEcp58Df6
|
rXFiz9DSq8
0rR5Kfs+M+Vuq5Z6zY98/SP0A6URIr9NFu+Cs9/gf+q4TRwsOzRMjMQzJL8f
7TXPEHH2+qEcpDKz/5pE0cvrgHr63XKu4XbzLCOBz0DoFAw3vkuxGwJq4Cpx
kt+eCtxSKUzNtXMn/mbPqPl4NZNJ8yzMqTFSODS4bYTBaN/uQYcOAF3NBYFd
5x9TzIAoW6ai13a8h/s9i5FlVRJDe2cetQhArrIVBquF0L0mUXMWNPFKkaQE
BsxpMCYh7pp7YlyCNode12k5jY1/lc8jQLQJ+EJHdCdM5t3emRzkPgND4a7O
NhoIkUUS2R1oEV1toDj9iDzGVFwOvWyt4GzA9XdxT333JU/n8m+N6hs23MBc
Z086kp9rJGVxZ5f80jRz3ZcjU6zWjR9ucRyjbsuVn1t4EJEm6
|
A7KaHm13m0v
wN/O4KYTiiY3aO3siayjNrrNBpn1OeLv9UUneLSCdxcUqjRvOrdA5NYv25Hb
4wkFCIhC/Y2ze/kNyis6FrXtStcjKC1w9Kg8O25VXB1Fmpu+4nzpbNdJ9LXa
hF7wjOPXN6dixVKpzwTYjEFDSMaMhaTOTCaqJig97624wv79URbCgsyzwaC7
YXRtbTstbFuEFBee3uW7B3xXw72mymM2BS2uPQ5NIwmacbhta8aCRQEGqIZ0
78YrrOlZIjar3lbTCo5o6nbbDq9bvilirWG/SgWINuc3pWl5CscRcgQQNp7o
LBgrSkQkv9AjZYcvisnr89TxjoxBO0Y93jgp4T14LnVwWQVx3l3d6S1wlsci
dVeaM24E/JtS8k9XAvgSoKCjyiqsawBMzScXCIRCk6nqX8ZaJU3rZ0LeOMTU
w6MC4dC+aY9SrCvNQub19mBdtJUwOBOqGdfd5IoqQkaL6DfOkmpnsCs5PuLb
GZBVhah5L87IY7r6TB1V7KboXH8PZIYc1zlemMZGU0o7+etxZWHgpdeX6JbJ
Is3ilAzYqw/Hz65no7eUxcDg1aOaxemuPqnYRGhW6PvjZbwAtfQPlofhB0jT
Ht5bRlzF17rn9q/6wzlc1ssp2xmeFzXoxffpELABV6+yj3gfQ/bxIB9NWjdZ
K08RX9rjm9CcBlRQeTZrD67SYQWqRpT5t7zcVDnx1s7ZffLBWm/vXLfPzMaQ
YEJ4EfoduSutjshXvR+VQRPs2TWcF7OsaE4csedKUGFuo9DYfFIHFDNg+1Py
rlWJ0J/X0PduAuCZ+uQSsM/ex/vfXp6Z39ngq4exUXoPtAIqafrDMd8SuAty
EZhyY9V9Lp2qNQDbl6JI39bDz+6pDmjJ2jlnpMCezRK89cG11IqiUWvIPxHj
oiT1guH1uk4sQ2Pc1J4zjJNsZgoJDcPBbfss4kAqUJvQyFbzWshhtVeAv3dm
gwUENIhNK/erjpgw2BIRayzYw001jAIF5c7rYg38o6x3YdAtU3d3QpuwG5xD
fODxzfL3yEKQr48C/KqxI87uGwyg6H5gc2AcLU9JYt5QoDFoC7PFxcE3RVqc
7/Um9Js9X9UyriEjftWt86/tEyG7F9tWGxGNEZo3MOydwX/7jtwoxQE5ybFj
WndqLp8DV3naLQsh/Fz8JnTYHvOR72vuiw/x5D5PFuXV0aSVvmw5Wnb09q/B
owS14WzoHH6ekaWbh78xlypn/L/M+nIIEX1Ol3TaVOqIxvXZ2sjm86xRz0Ed
oHFfupSekdBULCqptxpFpBshZFvauUH8Ez7wA7wjL65GVlZ0f74U7MJVu9Sw
sZdgsLmnsQvr5n2ojNNBEv+qKG2wpUYTmWRaRc5EClUNfhzh8iDdHIsl6edO
ewORRrNiBay1NCzlfz1cj6VlYYQUM9bDEyqrwO400XQNpoFOxo4fxUdd+AHm
CBhHbyCR81/C6LQTG2JQBvjykG4pmoqnYPxDyeiCEG+JFHmP1IL+jggdjWhL
WQatslrWxuESEl3PEsrAkMF7gt0dBLgnWsc1cmzntG1rlXVi/Hs2TAU3RxEm
MSWDFubSivLWSqZj/XfGWwVpP6fsnsfxpY3d3h/fTxDu7U8GddaFRQhJ+0ZO
dx6nRJUW3u6xnhH3mYVRk88EMtpEpKrSIWfXphgDUPZ0f4agRzehkn9vtzCm
NjFnQb0/shnqTh4Mo/8oommbsBTUKPYS7/1oQCi12QABjJDt+LyUan+4iwvC
i0k0IUIHvk21381vC0ixYDZxzY64+xx/RNID+iplgzq9PDZgjc8L7jMg+2+m
rxPS56e71m5E2zufZ4d+nFjIg+dHD/ShNPzVpXizRVUERztLuak8Asah3/yv
wOrH1mKEMMGC1/6qfvZUgFLJH5V0Ep0n2K/Fbs0VljENIN8cjkCKdG8aBnef
EhITdV7CVjXcivQ6efkbOQCfkfcwWpaBFC8tD/zebXFE+JshW16D4EWXMnSm
/9HcGwHvtlAj04rwrZ5tRvAgf1IR83kqqiTvqfENcj7ddCFwtNZrQK7EJhgB
5Tr1tBFcb9InPRtS3KYteYHl3HWR9t8E2YGE8IGrS1sQibxaK/C0kKbqIrKp
npwtoOLsZPNbPw6K2jpko9NeZAx7PYFmamR4D50KtzgELQcaEsi5aCztMg7f
p1mK6ijyMKIRKwNKIYHagRRVLNgQLg/WTKzGVbWwq6kQaQyArwQCUXo4uRty
zGMaKbTG4dns1OFB1g7NCiPb6s1lv0/lHFAF6HwoYV/FPSL/pirxyDSBb/FR
RA3PIfmvGfMUGFVWlyS7+O73l5oIJHxuaJrR4EenzAu4Avpa5d+VuiYbM10a
LaVegVPvFn4pCP4U/Nbbw4OTCFX2HKmWEiVBB0O3J9xwXWpxN1Vr5CDi75Fq
NhxYCjgSJzWOUD34Y1dAfcj57VINmQVEWyc8Tch8vg9MnHGCOfOjRqp0VGyA
S15AVD2QS1V6fhRimJSVyT6QuGb8tKRsl2N+a2Xze36vgMhw7XK7zh//jC2H""".replace("\n", "")
solve_challenge(b64_crypt)
|
popravich/elasticmagic
|
tests/test_codec.py
|
Python
|
apache-2.0
| 1,724
| 0.00174
|
import unittest
from elasticmagic.types import Integer, Float, Boolean
from elasticmagic.ext.queryfilter.codec import SimpleCodec
class SimpleCodecTest(unittest.TestCase):
def test_decode(self):
codec = SimpleCodec()
self.assertEqual(
codec.decode({'country': ['ru', 'ua', 'null']}),
{
'country': {
'exact': [['ru'], ['ua'], [None]],
}
}
)
self.assertEqual(
codec.decode({'category': ['5', '6:a', 'b:c', 'null']}, {'category': [Integer]}),
{
'category': {
'exact': [[5], [6, 'a'], [None]]
}
}
)
self.assertEqual(
codec.decode({'manu': ['1:nokia:true', '2:samsung:false']}, {'manu': [Integer, None, Boolean]}),
{
'manu': {
'exact': [[1, 'nokia', True], [2, 'samsung', False]],
}
}
)
self.assertEqual(
codec.decode({'is_active': ['true']}, {'is_active': Boolean}),
{
'is_active': {
'exact': [[True]],
}
}
)
self.assertEqual(
codec.decode([('price__gte', ['100.1', '101.0'])
|
, ('price__lte', ['200'])], {'price': Float}),
{
'price': {
'gte': [[100.1], [101.0]],
'lte': [[200.0]],
}
}
)
self.assertEqual(
codec.decode({'price__lte': '123a:bc'}, {'price': [Fl
|
oat]}),
{}
)
self.assertRaises(TypeError, lambda: codec.decode(''))
|
ssic7i/rpi_weather_app
|
form_ui.py
|
Python
|
mit
| 8,911
| 0.001131
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form_ui.ui'
#
# Created: Fri Apr 01 21:42:03 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(500, 350)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(40, 40, 121, 16))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.lcdNumber_temp = QtGui.QLCDNumber(Form)
self.lcdNumber_temp.setGeometry(QtCore.QRect(10, 60, 181, 61))
font = QtGui.QFont()
font.setFamily(_fromUtf8("MS Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lcdNumber_temp.setFont(font)
self.lcdNumber_
|
temp.setFrameShape(QtGui.Q
|
Frame.Panel)
self.lcdNumber_temp.setLineWidth(1)
self.lcdNumber_temp.setNumDigits(7)
self.lcdNumber_temp.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcdNumber_temp.setProperty("value", 0.0)
self.lcdNumber_temp.setObjectName(_fromUtf8("lcdNumber_temp"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(210, 40, 101, 16))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.lcdNumber_hum = QtGui.QLCDNumber(Form)
self.lcdNumber_hum.setGeometry(QtCore.QRect(210, 60, 101, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lcdNumber_hum.setFont(font)
self.lcdNumber_hum.setFrameShape(QtGui.QFrame.Panel)
self.lcdNumber_hum.setNumDigits(4)
self.lcdNumber_hum.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcdNumber_hum.setProperty("value", 0.0)
self.lcdNumber_hum.setObjectName(_fromUtf8("lcdNumber_hum"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(10, 10, 141, 16))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.dateTimeEdit_time = QtGui.QDateTimeEdit(Form)
self.dateTimeEdit_time.setEnabled(True)
self.dateTimeEdit_time.setGeometry(QtCore.QRect(150, 10, 201, 22))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.dateTimeEdit_time.setFont(font)
self.dateTimeEdit_time.setFocusPolicy(QtCore.Qt.NoFocus)
self.dateTimeEdit_time.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.dateTimeEdit_time.setFrame(True)
self.dateTimeEdit_time.setReadOnly(True)
self.dateTimeEdit_time.setKeyboardTracking(False)
self.dateTimeEdit_time.setObjectName(_fromUtf8("dateTimeEdit_time"))
self.label_4 = QtGui.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(10, 200, 71, 20))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(10, 265, 71, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_6 = QtGui.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(10, 236, 111, 20))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_cloud_text = QtGui.QLabel(Form)
self.label_cloud_text.setGeometry(QtCore.QRect(130, 235, 111, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_cloud_text.setFont(font)
self.label_cloud_text.setObjectName(_fromUtf8("label_cloud_text"))
self.label_9 = QtGui.QLabel(Form)
self.label_9.setGeometry(QtCore.QRect(360, 40, 101, 16))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.lcdNumber_pres = QtGui.QLCDNumber(Form)
self.lcdNumber_pres.setGeometry(QtCore.QRect(330, 60, 141, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lcdNumber_pres.setFont(font)
self.lcdNumber_pres.setFrameShape(QtGui.QFrame.Panel)
self.lcdNumber_pres.setNumDigits(5)
self.lcdNumber_pres.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcdNumber_pres.setProperty("value", 0.0)
self.lcdNumber_pres.setObjectName(_fromUtf8("lcdNumber_pres"))
self.label_wind_speed_text = QtGui.QLabel(Form)
self.label_wind_speed_text.setGeometry(QtCore.QRect(80, 265, 71, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_wind_speed_text.setFont(font)
self.label_wind_speed_text.setObjectName(_fromUtf8("label_wind_speed_text"))
self.label_wind_dest_text = QtGui.QLabel(Form)
self.label_wind_dest_text.setGeometry(QtCore.QRect(150, 266, 71, 20))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_wind_dest_text.setFont(font)
self.label_wind_dest_text.setObjectName(_fromUtf8("label_wind_dest_text"))
self.label_weather_descr_text = QtGui.QLabel(Form)
self.label_weather_descr_text.setGeometry(QtCore.QRect(90, 200, 101, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_weather_descr_text.setFont(font)
self.label_weather_descr_text.setObjectName(_fromUtf8("label_weather_descr_text"))
self.label_7 = QtGui.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(10, 320, 141, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_time_date = QtGui.QLabel(Form)
self.label_time_date.setGeometry(QtCore.QRect(150, 320, 341, 19))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_time_date.setFont(font)
self.label_time_date.setObjectName(_fromUtf8("label_time_date"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "rpi weather", None))
self.label.setText(_translate("Form", "Температура", None))
self.label_2.setText(_translate("Form", "Влажность", None))
self.label_3.setText(_translate("Form", "Текущее время", None))
self.label_4.setText(_translate("Form", "Погода:", None))
self.label_5.setText
|
HybridF5/jacket
|
jacket/compute/image/download/file.py
|
Python
|
apache-2.0
| 7,388
| 0.000677
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from jacket.compute import exception
from jacket.i18n import _, _LI
import jacket.compute.image.download.base as xfer_base
import jacket.compute.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
opt_group = cfg.ListOpt(name='filesystems', default=[],
help=_('List of file systems that are configured '
'in this file in the '
'image_file_url:<list entry name> '
'sections'))
CONF.register_opt(opt_group, group="image_file_url")
# This module extends the configuration options for compute.conf. If the user
# wishes to use the specific configuration settings the following needs to
# be added to compute.conf:
# [image_file_url]
# filesystem = <a list of strings referencing a config section>
#
# For each entry in the filesystem list a new configuration section must be
# added with the following format:
# [image_file_url:<list entry>]
# id = <string>
# mountpoint = <string>
#
# id:
# An opaque string. In order for this module to know that the remote
# FS is the same one that is mounted locally it must share information
# with the glance deployment. Both glance and compute-compute must be
# configured with a unique matching string. This ensures that the
# file:// advertised URL is describing a file system that is known
# to compute-compute
# mountpoint:
# The location at which the file system is locally mounted. Glance
# may mount a shared file system on a different path than compute-compute.
# This value will be compared against the metadata advertised with
# glance and paths will be adjusted to ensure that the correct file
# file is copied.
#
# If these values are not added to compute.conf and the file module is in the
# allowed_direct_url_schemes list, then the legacy behavior will occur such
# that a copy will be attempted assuming that the glance and compute file systems
# are the same.
class FileTransfer(xfer_base.TransferBase):
desc_required_keys = ['id', 'mountpoint']
# NOTE(jbresnah) because the group under which these options are added is
# dyncamically determined these options need to stay out of global space
# or they will confuse generate_sample.sh
filesystem_opts = [
cfg.Str
|
Opt('id',
help=_('A unique ID given to each file system. This is '
'value is set in Glance and agreed upon here so '
'that the operator knowns they are dealing with '
'the same file system.')),
cfg.StrOpt('mountpoint',
help=_('The path at which the file system is mounted.')),
]
def _get_options(self):
fs_dict = {}
|
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
conf_group = CONF[group_name]
if conf_group.id is None:
msg = _('The group %(group_name)s must be configured with '
'an id.') % {'group_name': group_name}
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
fs_dict[CONF[group_name].id] = CONF[group_name]
return fs_dict
def __init__(self):
# create the needed options
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
CONF.register_opts(self.filesystem_opts, group=group_name)
def _verify_config(self):
for fs_key in self.filesystems:
for r in self.desc_required_keys:
fs_ent = self.filesystems[fs_key]
if fs_ent[r] is None:
msg = _('The key %s is required in all file system '
'descriptions.')
LOG.error(msg)
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
def _file_system_lookup(self, metadata, url_parts):
for r in self.desc_required_keys:
if r not in metadata:
url = url_parts.geturl()
msg = _('The key %(r)s is required in the location metadata '
'to access the url %(url)s.') % {'r': r, 'url': url}
LOG.info(msg)
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
id = metadata['id']
if id not in self.filesystems:
msg = _('The ID %(id)s is unknown.') % {'id': id}
LOG.info(msg)
return
fs_descriptor = self.filesystems[id]
return fs_descriptor
def _normalize_destination(self, nova_mount, glance_mount, path):
if not path.startswith(glance_mount):
msg = (_('The mount point advertised by glance: %(glance_mount)s, '
'does not match the URL path: %(path)s') %
{'glance_mount': glance_mount, 'path': path})
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
new_path = path.replace(glance_mount, nova_mount, 1)
return new_path
def download(self, context, url_parts, dst_file, metadata, **kwargs):
self.filesystems = self._get_options()
if not self.filesystems:
# NOTE(jbresnah) when nothing is configured assume legacy behavior
nova_mountpoint = '/'
glance_mountpoint = '/'
else:
self._verify_config()
fs_descriptor = self._file_system_lookup(metadata, url_parts)
if fs_descriptor is None:
msg = (_('No matching ID for the URL %s was found.') %
url_parts.geturl())
raise exception.ImageDownloadModuleError(reason=msg,
module=str(self))
nova_mountpoint = fs_descriptor['mountpoint']
glance_mountpoint = metadata['mountpoint']
source_file = self._normalize_destination(nova_mountpoint,
glance_mountpoint,
url_parts.path)
lv_utils.copy_image(source_file, dst_file)
LOG.info(_LI('Copied %(source_file)s using %(module_str)s'),
{'source_file': source_file, 'module_str': str(self)})
def get_download_handler(**kwargs):
return FileTransfer()
def get_schemes():
return ['file', 'filesystem']
|
Wang-Sen/nqzx-backend
|
bootcamp/app/models.py
|
Python
|
gpl-3.0
| 1,418
| 0.009873
|
# -*- encoding: utf-8 -*-
import re
from django.contrib.auth.models import User
class LoginBackend(object):
def authenticate(self, username=None, password=None):
if username:
#email
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", username) != None:
try:
|
user = User.objects.get(email=usern
|
ame)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
#mobile
elif len(username)==11 and re.match("^(1[3458]\d{9})$", username) != None:
try:
user = User.objects.get(mobile=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
else:
#nick
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
else:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
msincenselee/vnpy
|
vnpy/gateway/tora/terminal_info.py
|
Python
|
mit
| 1,108
| 0
|
import wmi
import requests
import pythoncom
def get_iip():
""""""
f = requests.get("http://myip.dnsomatic.com")
iip = f.text
return iip
def get_lip():
""""""
c = wmi.WMI()
lip = ""
for interface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1):
lip = interface.IPAddress[0]
return lip
def get_mac():
""""""
c = wmi.WMI()
mac = ""
for interface in c.Win32_NetworkAdapterConfiguration(IPEna
|
bled=1):
mac = interface.MACAddress
return mac
def get_hd():
""""""
c = wmi.WMI()
hd = "disk01"
# for disk in c.Win32_DiskDrive():
# hd = disk.SerialNumber.strip()
return hd
def get_terminal_info():
""""""
# Initialize COM
|
object in this thread.
pythoncom.CoInitialize()
iip = ""
iport = ""
lip = get_lip()
mac = get_mac()
hd = get_hd()
terminal_info = ";".join([
"PC",
f"IIP={iip}",
f"IPORT={iport}",
f"LIP={lip}",
f"MAC={mac}",
f"HD={hd}",
"PCN=NA;CPU=NA;PI=NA;VOL=NA@NA"
])
return terminal_info
|
wkentaro/fcn
|
examples/apc2016/datasets/mit_benchmark.py
|
Python
|
mit
| 3,797
| 0
|
import itertools
import os
import os.path as osp
import chainer
import numpy as np
import scipy.misc
from sklearn.model_selection import train_test_split
from base import APC2016DatasetBase
def ids_from_scene_dir(scene_dir, empty_scene_dir):
for i_frame in itertools.count():
empt
|
y_file = osp.join(
empty_scene_dir, 'frame-{:06}.color.png'.format(i_frame))
rgb_file = osp.join(
scene_dir, 'frame-{:06}.color.png'.format(i_frame))
segm_file = osp.join(
scene_dir, 'segm/frame-{:06}.segm.png
|
'.format(i_frame))
if not (osp.exists(rgb_file) and osp.exists(segm_file)):
break
data_id = (empty_file, rgb_file, segm_file)
yield data_id
def bin_id_from_scene_dir(scene_dir):
caminfo = open(osp.join(scene_dir, 'cam.info.txt')).read()
loc = caminfo.splitlines()[0].split(': ')[-1]
if loc == 'shelf':
bin_id = caminfo.splitlines()[1][-1]
else:
bin_id = 'tote'
return bin_id
class APC2016mit_benchmarkDataset(APC2016DatasetBase):
def __init__(self, data_type):
assert data_type in ('train', 'val')
self.dataset_dir = chainer.dataset.get_dataset_directory(
'apc2016/benchmark')
data_ids = self._get_ids()
ids_train, ids_val = train_test_split(
data_ids, test_size=0.25, random_state=1234)
if data_type == 'train':
self._ids = ids_train
else:
self._ids = ids_val
def __len__(self):
return len(self._ids)
def _get_ids_from_loc_dir(self, env, loc_dir):
assert env in ('office', 'warehouse')
loc = osp.basename(loc_dir)
data_ids = []
for scene_dir in os.listdir(loc_dir):
scene_dir = osp.join(loc_dir, scene_dir)
bin_id = bin_id_from_scene_dir(scene_dir)
empty_dir = osp.join(
self.dataset_dir, env, 'empty', loc, 'scene-{}'.format(bin_id))
data_ids += list(ids_from_scene_dir(scene_dir, empty_dir))
return data_ids
def _get_ids(self):
data_ids = []
# office
contain_dir = osp.join(self.dataset_dir, 'office/test')
for loc in ['shelf', 'tote']:
loc_dir = osp.join(contain_dir, loc)
data_ids += self._get_ids_from_loc_dir('office', loc_dir)
# warehouse
contain_dir = osp.join(self.dataset_dir, 'warehouse')
for sub in ['practice', 'competition']:
sub_contain_dir = osp.join(contain_dir, sub)
for loc in ['shelf', 'tote']:
loc_dir = osp.join(sub_contain_dir, loc)
data_ids += self._get_ids_from_loc_dir('warehouse', loc_dir)
return data_ids
def _load_from_id(self, data_id):
empty_file, rgb_file, segm_file = data_id
img = scipy.misc.imread(rgb_file, mode='RGB')
img_empty = scipy.misc.imread(empty_file, mode='RGB')
# Label value is multiplied by 9:
# ex) 0: 0/6=0 (background), 54: 54/6=9 (dasani_bottle_water)
lbl = scipy.misc.imread(segm_file, mode='L') / 6
lbl = lbl.astype(np.int32)
img_empty[lbl > 0] = img[lbl > 0]
return img_empty, lbl
def get_example(self, i):
data_id = self._ids[i]
img, lbl = self._load_from_id(data_id)
datum = self.img_to_datum(img)
return datum, lbl
if __name__ == '__main__':
import matplotlib.pyplot as plt
import six
dataset_train = APC2016mit_benchmarkDataset('train')
dataset_val = APC2016mit_benchmarkDataset('val')
print('train: %d, val: %d' % (len(dataset_train), len(dataset_val)))
for i in six.moves.range(len(dataset_val)):
viz = dataset_val.visualize_example(i)
plt.imshow(viz)
plt.show()
|
tpltnt/SimpleCV
|
SimpleCV/examples/detection/face-substition.py
|
Python
|
bsd-3-clause
| 1,253
| 0.003192
|
#!/usr/bin/env python
#
# Released under the BSD license. See LICENSE file for details.
"""
All this example does is find a face and replace it with another image. The
image should auto scale to match the size of the face.
"""
from __future__ import print_function
print(__doc__)
from SimpleCV
|
import Camera, Display, HaarCascade, Imag
|
e
#initialize the camera
cam = Camera()
# Create the display to show the image
display = Display()
# Load the new face image
troll_face = Image('troll_face.png', sample=True)
# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")
# Loop forever
while display.isNotDone():
# Get image, flip it so it looks mirrored, scale to speed things up
img = cam.getImage().flipHorizontal().scale(0.5)
# load in trained face file
faces = img.findHaarFeatures(haarcascade)
# If there were faces found do something
if faces:
face = faces[-1]
# Load the image to super impose and scale it correctly
troll = troll_face.scale(face.height(), face.width())
mymask = troll.invert()
# Super impose the new face on the existing face
img = img.blit(troll, face.topLeftCorner(), alphaMask=mymask)
# Display the image
img.save(display)
|
Insanityandme/dotfiles
|
vim/bundle/ultisnips/test/test_Folding.py
|
Python
|
unlicense
| 1,594
| 0.001255
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Folding Interaction {{{#
class FoldingEnabled_SnippetWithFold_ExpectNoFolding(_VimTest):
def _extra_vim_config(self, vim_config):
vim_config.append('set foldlevel=0')
vim_config.append('set foldmethod=marker')
snippets = ('test', r"""Hello {{{
${1:Welt} }}}""")
keys = 'test' + EX + 'Ball'
wanted = """Hello {{{
Ball }}}"""
class FoldOverwrite_Simple_ECR(_VimTest):
snippets = ('fold',
"""# ${1:Description} `!p snip.rv = vim.eval("&
|
foldmarker").split(",")[0]`
# End: $1 `!p snip.rv = vim.eval("&foldmarker").split(",")[1]`""")
keys
|
= 'fold' + EX + 'hi'
wanted = '# hi {{{\n\n# End: hi }}}'
class Fold_DeleteMiddleLine_ECR(_VimTest):
snippets = ('fold',
"""# ${1:Description} `!p snip.rv = vim.eval("&foldmarker").split(",")[0]`
# End: $1 `!p snip.rv = vim.eval("&foldmarker").split(",")[1]`""")
keys = 'fold' + EX + 'hi' + ESC + 'jdd'
wanted = '# hi {{{\n\n# End: hi }}}'
class PerlSyntaxFold(_VimTest):
def _extra_vim_config(self, vim_config):
vim_config.append('set foldlevel=0')
vim_config.append('syntax enable')
vim_config.append('set foldmethod=syntax')
vim_config.append('let g:perl_fold = 1')
vim_config.append('so $VIMRUNTIME/syntax/perl.vim')
snippets = ('test', r"""package ${1:`!v printf('c%02d', 3)`};
${0}
1;""")
keys = 'test' + EX + JF + 'sub junk {}'
wanted = 'package c03;\nsub junk {}\n1;'
# End: Folding Interaction #}}}
|
demisto/content
|
Packs/Intezer/Integrations/IntezerV2/IntezerV2.py
|
Python
|
mit
| 16,706
| 0.002454
|
from http import HTTPStatus
from typing import Callable
from typing import Dict
from typing import List
from typing import Union
import demistomock as demisto
import requests
from CommonServerPython import *
from CommonServerUserPython import *
from intezer_sdk import consts
from intezer_sdk.analysis import Analysis
from intezer_sdk.analysis import get_analysis_by_id
from intezer_sdk.analysis import get_latest_analysis
from intezer_sdk.api import IntezerApi
from intezer_sdk.errors import AnalysisIsAlreadyRunning
from intezer_sdk.errors import AnalysisIsStillRunning
from intezer_sdk.errors import FamilyNotFoundError
from intezer_sdk.errors import HashDoesNotExistError
from intezer_sdk.errors import InvalidApiKey
from intezer_sdk.family import Family
from intezer_sdk.sub_analysis import SubAnalysis
from requests import HTTPError
''' CONSTS '''
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
IS_AVAILABLE_URL = 'is-available'
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def _get_missing_file_result(file_hash: str) -> CommandResults:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
return CommandResults(
readable_output=f'The Hash {file_hash} was not found on Intezer genome database',
outputs={
outputPaths['dbotscore']: dbot
}
)
def _get_missing_analysis_result(analysis_id: str, sub_analysis_id: str = None) -> CommandResults:
if not sub_analysis_id:
output = f'The Analysis {analysis_id} was not found on Intezer Analyze'
else:
output = f'Could not find the analysis \'{analysis_id}\' or the sub analysis \'{sub_analysis_id}\''
return CommandResults(
readable_output=output
)
def _get_missing_family_result(family_id: str) -> CommandResults:
return CommandResults(
readable_output=f'The Family {family_id} was not found on Intezer Analyze'
)
def _get_analysis_running_result(analysis_id: str = None, response: requests.Response = None) -> CommandResults:
if response:
analysis_id = response.json()['result_url'].split('/')[2]
context_json = {
'ID': analysis_id,
'Status': 'InProgress'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output='Analysis is still in progress',
outputs=context_json
)
''' COMMANDS '''
def check_is_available(intezer_api: IntezerApi, args: dict) -> str:
try:
response = intezer_api.get_url_result(f'/{IS_AVAILABLE_URL}')
return 'ok' if response else 'Empty response from intezer service'
except InvalidApiKey as error:
return f'Invalid API key received.\n{error}'
except HTTPError as error:
return f'Error occurred when reaching Intezer Analyze. Please check Analyze Base URL. \n{error}'
except ConnectionError as error:
return f'Error connecting to Analyze Base url.\n{error}'
def analyze_by_hash_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
analysis = Analysis(file_hash=file_hash, api=intezer_api)
try:
analysis.send()
analysis_id = analysis.analysis_id
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis_id)
)
except HashDoesNotExistError:
return _get_missing_file_result(file_hash)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str
|
]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
latest_analysis = get_latest_analysis(file_hash=file_hash
|
, api=intezer_api)
if not latest_analysis:
return _get_missing_file_result(file_hash)
return enrich_dbot_and_display_file_analysis_results(latest_analysis.result())
def analyze_by_uploaded_file_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
file_id = args.get('file_entry_id')
file_data = demisto.getFilePath(file_id)
try:
analysis = Analysis(file_path=file_data['path'], api=intezer_api)
analysis.send()
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis.analysis_id)
)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(response=error.response)
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
response = intezer_api.get_url_result(f'/endpoint-analyses/{analysis_id}')
analysis_result = response.json()['result']
else:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
analysis_result = analysis.result()
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
else:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_id=analysis_id))
return command_results
def get_analysis_sub_analyses_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = get_analysis_by_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
except AnalysisIsStillRunning:
return _get_analysis_running_result(analysis_id=str(analysis_id))
sub_analyses: List[SubAnalysis] = analysis.get_sub_analyses()
all_sub_analyses_ids = [sub.analysis_id for sub in sub_analyses]
sub_analyses_table = tableToMarkdown('Sub Analyses', all_sub_analyses_ids, headers=['Analysis IDs'])
context_json = {
'ID': analysis.analysis_id,
'SubAnalysesIDs': all_sub_analyses_ids
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output=sub_analyses_table,
outputs=context_json,
raw_response=all_sub_analyses_ids
)
def get_analysis_code_reuse_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubA
|
nemesisdesign/openwisp2
|
openwisp_controller/config/migrations/0023_update_context.py
|
Python
|
gpl-3.0
| 888
| 0
|
# Generated by Django 3.0.3 on 2020-02-26 19:58
import collections
import jsonfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('config', '0022_vpn_format_dh')]
operations = [
migrations.AlterField(
model_name='config',
name='context',
field=jsonfield.fields.JSONField(
blank=True,
default=dict,
dump_kwargs={'ensure_ascii': False, 'indent': 4},
help_text=(
'Additional <a
|
href="http://netjsonconfig.openwisp.org'
'/en/stable/general/basics.html#context" target="_blank">'
'context (configuration variables)</a> in JSON format'
),
load_kwargs={'objec
|
t_pairs_hook': collections.OrderedDict},
),
)
]
|
ph1l/halo_radio
|
rename.py
|
Python
|
gpl-2.0
| 1,285
| 0.044358
|
#!/usr/bin/python
#
# simple hack to rename files in the db.
#
# pass it a list of song ids (seerated by commas), a search sctring and a replacement
#
#
import string,os,sys,time,getopt
def usage():
print "
|
%s [--ids=<songid1>,<songid2>[,...]] --search=<old_str> --replace=<new_str>" % (sys.argv[0])
sys.exit(2)
try:
opts, args = getopt.getopt( sys.argv[1:], "hi:s:r:", [ "help", "ids=", "search=", "replace=" ])
except getopt.GetoptError:
usage()
sys.exit(2)
ids = None
search = None
replace = None
for o, a in opts:
if o in ("-h", "--help"):
usa
|
ge()
sys.exit()
if o in ("-i", "--ids"):
ids = a.split(",")
if o in ( "-s", "--search"):
search = a
if o in ( "-r", "--replace"):
replace = a
print "%s %s %s" % (ids,search,replace)
if ( search == None ) or ( replace == None ):
usage()
import HaloRadio
if ids == None:
import HaloRadio.SongListMaker as SongListMaker
slm = SongListMaker.SongListMaker()
slm.GetAll()
ids = slm.list
import HaloRadio.Song as Song
for sid in ids:
s = Song.Song(sid)
pos = s.path.find(search)
if pos == -1:
continue
print "Matched %s." % (s.GetDisplayName())
s.UpdatePath(s.path.replace(search,replace))
print "Updated to %s." % (s.path)
print " done."
|
skylian/XWorld
|
games/xworld/maps/XWorldDialogMap.py
|
Python
|
apache-2.0
| 3,644
| 0.006861
|
import random
from xworld_env import XWorldEnv
from py_util import overrides
class XWorldDialogMap(XWorldEnv):
def __init__(self, item_path, start_level=0):
super(XWorldDialogMap, self).__init__(
item_path=item_path,
max_height=1,
max_width=1,
start_level=start_level,
maze_generation=False)
self.class_per_session = 2 # max number of classes in a session
# value < 1 denotes all classes are used
self.sel_classes = {} # selected classes for a session
self.learned_classes = {} # classes that has been interacted with
self.img_var_ratio = 0.5 # the chance of using a new image instance
self.shuffle = True # shuffle classes
def _configure(self, select_class=True):
self.set_goal_subtrees(["animal"])
self.set_entity(type="agent", loc=(0, 0, 0), force_occupy=True)
self.set_entity(type="goal", loc=(0, 0, 0), force_occupy=True)
if select_class:
self.select_goal_classes() # re-select goal class for a new session
self.learned_classes = {} # clear the learned classes at the beginning of each session
if self.shuffle:
self.shuffle_classes("goal")
@overrides(XWorldEnv)
def get_all_possible_names(self, type):
"""
Return all possible names for type
'goal' - all unique object names
'block' - all block names
'agent' - all agent names
"""
if type == "goal":
return self.get_selected_goal_classes()
else:
return self.items[type].keys()
def shuffle_classes(self, type):
K = self.items[type].keys()
V = self.items[type].values()
random.shuffle(V)
self.items[type].update(dict(zip(K, V)))
def select_goal_classes(self):
"""
Sample a number of classes (class_per_session) for interaction within a session
"""
if self.class_per_session > 1:
self.sel_classes = random.sample(self.items["goal"].keys(), self.class_per_session)
else:
self.sel_classes = self.items["goal"].keys()
def get_selected_goal_classes(self):
"""
Get the selected classes for a session
"""
if not self.sel_classes:
self.sel_classes = sel
|
f.select_goal_classes()
return self.sel_classes
def within_session_reinstantiation(self):
# re-instantiate within the same session
# re-load from map config
|
with the same set of sampled classes
for e in self.get_goals():
# store what has been learned
self.learned_classes[e.name] = e.asset_path
if random.uniform(0,1) > self.img_var_ratio: # no var
# change name without changing the asset_path
goals = self.get_selected_goal_classes()
random.shuffle(goals)
#name = random.choice(goals)
name = goals[0]
# check whether this class has been learned before or not
if name not in self.learned_classes.keys():
self.set_property(e, property_value_dict={"name" : name, "asset_path" : None})
self.learned_classes[name] = e.asset_path
else:
self.set_property(e, property_value_dict={"asset_path" : self.learned_classes[name],
"name" : None})
else:
self.set_property(e, property_value_dict={"name" : None, "asset_path" : None})
|
yacoob/blitzloop
|
blitzloop/util.py
|
Python
|
gpl-2.0
| 1,930
| 0.005181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Hector Martin "marcan" <hector@marcansoft.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 or version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import configargparse
RESDIR = os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'res')
CFG = {
'fontdir': os.path.join(RESDIR, 'fonts'),
'gfxdir': os.path.join(RESDIR, 'gfx'),
'webdir': os.path.join(RESDIR, 'web'),
}
def init_argparser():
configargparse.init_argument_parser(
default_config_files=['/etc/blitzloop/cfg', '~/.blitzloop/cfg'])
parser = configargparse.get_argument_parser()
parser.add_argument(
'--fullscreen', default=False, action='store_true',
help='run blitzloop fullscreen')
def get_argpa
|
rser():
return configargparse.get_argument_parser()
def get_opts():
opts, unknown = get_argparser().parse_known_args()
return opts
def get_res_path(t, fp):
return os.path.join(CFG[t], fp)
def get_resfont_path(fp):
return get_res_path('fontdir', fp)
def get_resgfx_path(fp):
retur
|
n get_res_path('gfxdir', fp)
def get_webres_path(fp):
return get_res_path('webdir', fp)
def map_from(x, min, max):
return (x-min) / (max-min)
def map_to(x, min, max):
return min + x * (max - min)
init_argparser()
|
senthil10/scilifelab
|
scripts/RNA_analysis/quantify_rRNA.py
|
Python
|
mit
| 1,425
| 0.032281
|
import os
import sys
from string import *
import math
import string
import re
import commands
import operator
if len(sys.argv) < 2:
print "USAGE: python quantify_rRNA.py <gff file>"
sys.exit(0)
gffFile=sys.argv[1]
#rRNAgeneList=commands.getoutput("grep 'rRNA' "+gffFile+" |awk '{print $10}'").replace('"','').replace(";","").split("\n")
rRNAgeneList=commands.getoutput("grep 'rRNA' "+gffFile+" |awk -F ';' '{for (i=1; i<=NF; i++) {if ($i~"+"/"+"gene_id"+"/"+") print $i}}' |cut -d "+"'"+'"'+"' "+"-f 2 |sort |uniq").split('\n')
names=commands.get
|
output("ls -d tophat_out_*|sed 's/tophat_out_//g'").split('\n')
outList=[]
for name in names:
DIR = str('tophat_out_'+name)
try:
countFile=commands.getoutput("ls "+DIR+"/"+name+".counts")
totNum=commands.getou
|
tput("awk '{SUM+=$2} END {print SUM}' "+countFile)
if totNum != '':
rRNAnum=0
Lines=open(countFile).readlines()
n=0
for line in Lines:
geneID=line.split()[0]
if geneID in rRNAgeneList:
n=n+1
num=int(line.split()[1])
rRNAnum=rRNAnum+num
percent=round((float(rRNAnum)/int(totNum))*100,2)
outLine=countFile.split('/')[-1].split('.')[0]+'\t'+str(percent)+'%'+'\n'
outList.append(outLine)
except:
print "could not handle " + DIR
pass
if outList==[]:
print 'No data found. Check count tables!'
outF=open("rRNA.quantification",'w')
outF.writelines(outList)
outF.close()
|
yanheven/glance
|
glance/image_cache/client.py
|
Python
|
apache-2.0
| 4,184
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.serialization import jsonutils as json
from glance.common import client as base_client
from glance.common import exception
from glance import i18n
_ = i18n._
class CacheClient(base_client.BaseClient):
DEFAULT_PORT = 9292
DEFAULT_DOC_ROOT = '/v1'
def delete_cached_image(self, image_id):
"""
Delete a specified image from the cache
"""
self.do_request("DELETE", "/cached_images/%s" % image_id)
return True
def get_cached_images(self, **kwargs):
"""
Returns a list of images stored in the image cache.
"""
res = self.do_request("GET", "/cached_images")
data = json.loads(res.read())['cached_images']
return data
def get_queued_i
|
mages(self, **kwargs):
"""
Returns a list of images queued for caching
"""
res = self.do_request("GET", "/queued_images")
data = json.loads(res.read())['queued_images']
return data
def delete_all_cached_images
|
(self):
"""
Delete all cached images
"""
res = self.do_request("DELETE", "/cached_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def queue_image_for_caching(self, image_id):
"""
Queue an image for prefetching into cache
"""
self.do_request("PUT", "/queued_images/%s" % image_id)
return True
def delete_queued_image(self, image_id):
"""
Delete a specified image from the cache queue
"""
self.do_request("DELETE", "/queued_images/%s" % image_id)
return True
def delete_all_queued_images(self):
"""
Delete all queued images
"""
res = self.do_request("DELETE", "/queued_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def get_client(host, port=None, timeout=None, use_ssl=False, username=None,
password=None, tenant=None,
auth_url=None, auth_strategy=None,
auth_token=None, region=None,
is_silent_upload=False, insecure=False):
"""
Returns a new client Glance client object based on common kwargs.
If an option isn't specified falls back to common environment variable
defaults.
"""
if auth_url or os.getenv('OS_AUTH_URL'):
force_strategy = 'keystone'
else:
force_strategy = None
creds = {
'username': username or
os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')),
'password': password or
os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')),
'tenant': tenant or
os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')),
'auth_url': auth_url or
os.getenv('OS_AUTH_URL'),
'strategy': force_strategy or
auth_strategy or
os.getenv('OS_AUTH_STRATEGY', 'noauth'),
'region': region or
os.getenv('OS_REGION_NAME'),
}
if creds['strategy'] == 'keystone' and not creds['auth_url']:
msg = _("--os_auth_url option or OS_AUTH_URL environment variable "
"required when keystone authentication strategy is enabled\n")
raise exception.ClientConfigurationError(msg)
return CacheClient(
host=host,
port=port,
timeout=timeout,
use_ssl=use_ssl,
auth_token=auth_token or
os.getenv('OS_TOKEN'),
creds=creds,
insecure=insecure)
|
escamilla/MultipartPostHandler
|
MultipartPostHandler.py
|
Python
|
lgpl-3.0
| 3,692
| 0.002167
|
#!/
|
usr/bin/python
# Copyright 2013 Joshua Escamilla <jescamilla@hushmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed
|
in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The latest version of this program can always be found at:
# https://github.com/escamilla/MultipartPostHandler
#
# This program is a modified version of the MultipartPostHandler module by
# Will Holcomb <wholcomb@gmail.com>, which is available at:
# https://pypi.python.org/pypi/MultipartPostHandler/
__all__ = ["MultipartPostHandler"]
from email.generator import _make_boundary
from mimetypes import guess_type
from os.path import basename
import sys
PY3K = sys.version > "3"
if PY3K:
from io import IOBase as FILE_TYPE
from urllib.parse import urlencode
from urllib.request import BaseHandler
else:
FILE_TYPE = file
from urllib import urlencode
from urllib2 import BaseHandler
try:
bytes
except NameError:
bytes = str
def b(str_or_bytes):
if not isinstance(str_or_bytes, bytes):
return str_or_bytes.encode("ascii")
else:
return str_or_bytes
NEWLINE = "\r\n"
def _get_content_type(filename):
return guess_type(filename)[0] or "application/octet-stream"
class MultipartPostHandler(BaseHandler):
handler_order = BaseHandler.handler_order - 10
def _encode_form_data(self, fields, files):
boundary = _make_boundary()
parts = []
for name, value in fields:
parts.append(b("--%s" % boundary))
parts.append(b("Content-Disposition: form-data; name=\"%s\""
% name))
parts.append(b("Content-Type: text/plain"))
parts.append(b(""))
parts.append(b(value))
for name, fp in files:
filename = basename(fp.name)
mimetype = _get_content_type(filename)
fp.seek(0)
parts.append(b("--%s" % boundary))
parts.append(b("Content-Disposition: file; name=\"%s\"; " \
"filename=\"%s\"" % (name, filename)))
parts.append(b("Content-Type: %s" % mimetype))
parts.append(b(""))
parts.append(fp.read())
parts.append(b("--%s--" % boundary))
data = b(NEWLINE).join(parts)
return boundary, data
def http_request(self, req):
data = req.data
if data and isinstance(data, dict):
fields = []
files = []
for key, value in data.items():
if isinstance(value, FILE_TYPE):
files.append((key, value))
else:
fields.append((key, value))
if files:
boundary, data = self._encode_form_data(fields, files)
req.add_header("Content-Type", "multipart/form-data; " \
"boundary=\"%s\"" % boundary)
req.add_header("Content-Length", len(data))
else:
data = urlencode(fields, doseq=True)
req.data = data
return req
https_request = http_request
|
gwpy/seismon
|
utils/version.py
|
Python
|
gpl-3.0
| 5,922
| 0.000675
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of SeisMon
#
# SeisMon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SeisMon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SeisMon. If not, see <http://www.gnu.org/licenses/>
"""Git version generator
"""
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Adam Mercer <adam.mercer@ligo.org>'
import os
import subprocess
import time
import six
class GitStatus(object):
"""Git repository version information
"""
def __init__(self):
self._bin = self._find_git()
self.id = None
self.date = None
self.branch = None
self.tag = None
self.author = None
self.committer = None
self.status = None
# ------------------------------------------------------------------------
# Core methods
@staticmethod
def _find_git():
"""Determine the full path of the git binary on this
host
"""
for path in os.environ['PATH'].split(os.pathsep):
gitbin = os.path.join(path, 'git')
if os.path.isfile(gitbin) and os.access(gitbin, os.X_OK):
return gitbin
raise ValueError("Git binary not found on this host")
def git(self, *args):
"""Executable a command with arguments in a sub-process
"""
cmdargs = [self._bin] + list(args)
p = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=isinstance(args, six.string_types))
out, err = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(
|
p.returncode,
' '.join(cmdargs))
return out.strip()
# ------------------------------------------------------------------------
# Git communication methods
def get_commit_info(self):
"""Determine basic info about the latest commit
"""
a, b, c, d, e, f = self.git(
'log', '-1', '--pretty=format:%H,%ct,%an,%ae,%cn,%ce').dec
|
ode('utf-8').split(',')
self.id = a
self.udate = b
author = c
author_email = d
committer = e
committer_email = f
self.date = time.strftime('%Y-%m-%d %H:%M:%S +0000',
time.gmtime(float(self.udate)))
self.author = '%s <%s>' % (author, author_email)
self.committer = '%s <%s>' % (committer, committer_email)
def get_branch(self):
branch = self.git('rev-parse', '--symbolic-full-name', 'HEAD')
if branch == 'HEAD':
self.branch = None
else:
self.branch = os.path.basename(branch)
return self.branch
def get_status(self):
"""Determine modification status of working tree
"""
try:
status = self.git('diff-files', '--quiet')
except subprocess.CalledProcessError:
self._status = 'UNCLEAN: Modified working tree'
else:
try:
status = self.git('diff-index', '--cache', '--quiet',
'HEAD')
except subprocess.CalledProcessError:
self.status = 'UNCLEAN: Modified working tree'
else:
self.status = 'CLEAN: All modifications committed'
return self.status
def get_tag(self):
"""Determine name of the current tag
"""
if not self.id:
self.get_commit_info()
try:
self.tag = self.git('describe', '--exact-match', '--tags',
self.id)
except subprocess.CalledProcessError:
self.tag = None
return self.tag
# ------------------------------------------------------------------------
# Write
def write(self, fobj):
"""Write the contents of this `GitStatus` to a version.py format
file object
"""
# write file header
fobj.write("# -*- coding: utf-8 -*-\n"
"# Copyright (C) Duncan Macleod (2013)\n\n"
"\"\"\"Versioning record for CIS\n\"\"\"\n\n")
# write standard pythonic metadata
fobj.write("__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'\n"
"__version__ = '%s'\n"
"__date__ = '%s'\n\n" % (self.version, self.date))
# write git information
for attr in ['id', 'branch', 'tag', 'author', 'committer', 'status']:
val = getattr(self, attr)
if val:
fobj.write("git_%s = '%s'\n" % (attr, val))
else:
fobj.write("git_%s = None\n" % attr)
def __call__(self, outputfile='version.py'):
"""Process the version information into a new file
Parameters
----------
outputfile : `str`
path to output python file in which to write version info
Returns
-------
info : `str`
returns a string dump of the contents of the outputfile
"""
self.get_commit_info()
self.get_branch()
self.get_tag()
self.get_status()
self.version = self.tag or self.id
with open(outputfile, 'w') as fobj:
self.write(fobj)
with open(outputfile, 'r') as fobj:
return fobj.read()
|
scotartt/commentarius
|
decommentariis/manage.py
|
Python
|
gpl-2.0
| 256
| 0.003906
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "decommentariis.setting
|
s")
from django.core.management import execute_from_command_line
execute_from_command
|
_line(sys.argv)
|
hivam/l10n_co_doctor
|
doctor_attentions_diseases_inherit.py
|
Python
|
agpl-3.0
| 2,526
| 0.009925
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import *
from datetime import datetime, date
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class doctor_attentions_diseases(osv.osv):
_name = "doctor.attentions.diseases"
_inherit = 'doctor.attentions.diseases'
_columns = {
}
def _check_main_disease(self, cr, uid, ids, context=None):
'''
verify there's only one main disease
'''
for r in self.browse(cr, uid, ids, context=context):
diseases_ids = self.search(cr,uid,[('attentiont_id','=',r.attentiont_id.id),('diseases_type','=','main')])
if len(diseases_ids) > 1:
return False
return True
def _check_duplicated_disease(self, cr, uid, ids, context=None):
'''
|
verify duplicated disease
'''
for r in self.browse(cr, uid, ids, context=context):
diseases_ids = self.search(cr,uid,[('attentiont_id','=',r.attentiont_id.id),('diseases_id','=',r.diseases_id.id)])
if len(diseases_ids) > 1:
return False
|
return True
_constraints = [
#(_check_main_disease, u'Hay más de un diagnóstico seleccionado como Principal. Por favor seleccione uno como Principal y los demás como Relacionados.', [u'\n\nTipo de Diagnóstico\n\n']),
#(_check_duplicated_disease, u'Hay uno o más diagnósticos duplicados.', [u'\n\nDiagnósticos\n\n'])
]
doctor_attentions_diseases()
|
gjlawran/ckanext-bcgov
|
ckanext/bcgov/util/__init__.py
|
Python
|
agpl-3.0
| 965
| 0.014508
|
# Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
import ckan.plugins.toolkit as toolkit
from ckan.logic import get_action, NotFound
def get_tag_name(vocab_id, tag_id):
'''Returns the name of a tag for a given vocabulary and tag id.
Each EDC tag is a combination of three digits as tag id and tag name which are separated by '__'
'''
try:
|
#First get the list of all tags for the given vocabulary.
tags = toolkit.get_action('tag_list')(
data_dict={'vocabulary_id': vocab_id})
#For each tag extract the 3-digit tag id and compare it with the given tag id.
for tag in tags :
if (tag[:3] == ta
|
g_id) :
return tag[5:]
#No tags exist with the given tag id.
return None
except toolkit.ObjectNotFound:
#No vocabulary exist with the given vocabulary id.
return None
|
xianc78/Asteroids
|
player.py
|
Python
|
unlicense
| 2,498
| 0.03763
|
import pygame, random, sys, math
import constants
from bullet import Bullet
pygame.init()
laserSound = pygame.mixer.Sound("resources/laser.wav")
class Player:
def __init__(self, x, y):
self.facing = "up"
try:
image = pygame.image.load("resources/ship.png")
image = pygame.transform.scale(image, (40, 40))
except pygame.error:
print "ship.png has been deleted."
raw_input("<press enter to continue>")
pygame.quit()
sys.exit()
self.images = []
self.images.append(image)
self.images.append(pygame.transform.rotate(image, 90))
self.images.append(pygame.transform.rotate(image, 180))
self.images.append(pygame.transform.rotate(image, 270))
self.image = self.images[0]
self.rect = self.image.get_rect()
'''
self.speed = 0.0
self.rotspeed = 0
self.direction = 270
'''
self.rect.x = x
self.rect.y = y
self.change_x = 0
self.change_y = 0
self.lives = 3
self.score = 0
def update(self):
if self.facing == "down":
self.image = self.images[2]
elif self.facing == "up":
self.image = self.images[0]
elif self.facing == "right":
self.image = self.images[3]
elif self.facing == "left":
self.image = self.images[1]
self.rect.x += self.change_x
if self.rect.x < 0:
self.rect.x = 0
elif self.rect.right > constants.SCREEN_WIDTH:
self.rect.right = constants.SCREEN_WIDTH
self.rect.y += self.change_y
if self.rect.y < 0:
self.rect.y = 0
elif self.rect.bottom > constants.SCREEN_HEIGHT:
self.rect.bottom = con
|
stants.SCREEN_HEIGHT
for asteroid in self.level.asteroid_list:
if self.rect.colliderect(asteroid.rect):
self.lives -= 1
self.jump()
|
if self.lives <= 0:
pygame.quit()
sys.exit()
def change_speed(self, x, y):
self.change_x += x
self.change_y += y
def shoot(self):
if self.facing == "up":
change_x = 0
change_y = -8
elif self.facing == "down":
change_x = 0
change_y = 8
elif self.facing == "left":
change_x = -8
change_y = 0
elif self.facing == "right":
change_x = 8
change_y = 0
laserSound.play()
self.level.bullet_list.append(Bullet(self.rect.centerx, self.rect.centery, change_x, change_y, self.level))
def jump(self):
while True:
self.rect.x = random.randint(0, constants.SCREEN_WIDTH - self.rect.width)
self.rect.y = random.randint(0, constants.SCREEN_HEIGHT - self.rect.height)
for asteroid in self.level.asteroid_list:
if self.rect.colliderect(asteroid.rect):
break
else:
return None
continue
|
dragondjf/cqssl
|
app/cqsscworker.py
|
Python
|
apache-2.0
| 3,057
| 0.001308
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtCore import *
from collections import OrderedDict
from signalmanager import signalManager
class CqsscWorker(QObject):
def __init__(self, parent=None):
super(CqsscWorker, self).__init__(parent)
self._rawDataLines = []
self._rawData = OrderedDict()
self._rawData_number = OrderedDict()
self._rawData_ballone = OrderedDict()
self._rawData_balltwo = OrderedDict()
self._rawData_ballthree = OrderedDict()
self._rawData_ballfour = OrderedDict()
self._rawData_ballfive = OrderedDict()
self._rawData_sumsize = OrderedDict()
self._rawData_sumparity = OrderedDict()
def analyzeData(self, filename="cqssc_data.txt"):
signalManager.statusTextChanged.emit("reading data")
self.readData(filename)
values = self._rawData_number.values()
print len(values), len(set(values)), len(values) - len(set(values))
result = CqsscWorker.searchPattern(self._rawData_sumsize.values(), mode=0, count=13)
print result
def readData(self, filename):
with open(filename, "r") as f:
self._rawDataLines = f.readlines()
# print self._rawDataLines[0:100]
for line in self._rawDataLines[0:200]:
line = repr(line)
keyStart = line.find("20")
keyEnd = line.find("\\t")
key = line[keyStart:keyEnd]
valueStart = keyEnd + 2
valueEnd = -5
rawData_string = line[valueStart:valueEnd].split(",")
value = [int(item) for item in
line[valueStart:valueEnd].split(",")]
self._rawData.update({key: value})
self._rawData_number.update({key: int("".join(rawData_string))})
self._rawData_ballone.update({key: value[0]})
self._rawData_balltwo.update({key: value[1]})
self._rawData_ballthree.update({key: value[2]}
|
)
self._rawData_ballfour.update({key: value[3]})
self._rawData_ballfive.update({key: value[4]})
if sum(value) >= 23:
self._rawData_sumsize.update({key: 1})
else:
self._rawData_sumsize.update({key: 0})
if sum(value) % 2 == 0:
self._rawData_sumparity.update({key: 1})
else:
|
self._rawData_sumparity.update({key: 0})
if key <= "20150913-101":
print self._rawData_sumsize[key], value, sum(value)
signalManager.statusTextChanged.emit("reading data finished")
@staticmethod
def searchPattern(source, mode=0,count=4):
pattern = []
pattern.append(1-mode)
for i in range(count):
pattern.append(mode)
pattern.append(1-mode)
print pattern, source
result = []
for start in range(len(source)):
end = start + len(pattern)
if source[start:end] == pattern:
result.append(start)
return result
|
saltastro/pysalt
|
saltfp/saltfpringfilter.py
|
Python
|
bsd-3-clause
| 7,647
| 0.0136
|
################################# LICENSE ##############################
|
####
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
|
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""
RINGFILTER determines the center coordinates of a ring, bins the ring radially and computes its power spectrum, and allows the user to select a smoothing filter for the ring. It uses T. Williams code. The code assumes all the files are in the same directory. Also assumes that if there is a config file, it is also in the same directory as the data. Note that this config file is in the original FORTRAN code format so that the user does not have to write another file.
Updates:
20100706
* First wrote the code
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import numpy as np
#import pyfits
from pyraf import iraf
from pyraf.iraf import pysalt
import saltsafekey
import saltsafeio
import fpsafeio
from saltsafelog import logging
from salterror import SaltIOError
# This reads the FORTRAN config file if it exists
from fortranfp import ringfilter_wrapper
from fortranfp.ringfilter_wrapper import getpfp
debug=True
def saltfpringfilter(axc,ayc,arad,rxc,ryc,filterfreq,filterwidth,itmax,conv, fitwidth,image,logfile,useconfig,configfile,verbose):
""" Determines the center coordinates of a ring, bins the ring radially and computes its power spectrum, and allows the user to select a smoothing filter for the ring. """
# default parameter values are set up in the pyraf .par file. The values used are then changed if a FORTRAN config file exists and the user elects to override the pyraf .par file.
# Is the input FORTRAN config file specified?
# If it is blank, then it will be ignored.
if useconfig:
configfile = configfile.strip()
if len(configfile) > 0:
#check exists
saltsafeio.fileexists(configfile)
# read updated parameters from the file
array=getpfp(configfile,"axc")
s=len(array)
flag = array[s-1]
if flag == 1:
axc=float(array[0])
array=getpfp(configfile,"ayc")
s=len(array)
flag = array[s-1]
if flag == 1:
ayc=float(array[0])
array=getpfp(configfile,"arad")
s=len(array)
flag = array[s-1]
if flag == 1:
arad=float(array[0])
array=getpfp(configfile,"rxc")
s=len(array)
flag = array[s-1]
if flag == 1:
rxc=float(array[0])
array=getpfp(configfile,"ryc")
s=len(array)
flag = array[s-1]
if flag == 1:
ryc=float(array[0])
array=getpfp(configfile,"calring_filter_width")
s=len(array)
flag = array[s-1]
if flag == 1:
filterwidth=int(array[0])
array=getpfp(configfile,"calring_filter_freq")
s=len(array)
flag = array[s-1]
if flag == 1:
filterfreq=int(array[0])
array=getpfp(configfile,"calring_itmax")
s=len(array)
flag = array[s-1]
if flag == 1:
itmax=int(array[0])
array=getpfp(configfile,"calring_conv")
s=len(array)
flag = array[s-1]
if flag == 1:
conv=float(array[0])
array=getpfp(configfile,"calring_fitwidth")
s=len(array)
flag = array[s-1]
if flag == 1:
fitwidth=float(array[0])
# getting paths for filenames
pathin = os.path.dirname(image)
basein = os.path.basename(image)
pathlog = os.path.dirname(logfile)
baselog = os.path.basename(logfile)
# forcing logfiles to be created in the same directory as the input data
# (we change to this directory once starting the fortran code)
if len(pathin) > 0:
logfile = baselog
# start log now that all parameter are set up
with logging(logfile, debug) as log:
# Some basic checks, many tests are done in the FORTRAN code itself
# is the input file specified?
saltsafeio.filedefined('Input',image)
# if the input file is a file, does it exist?
if basein[0] != '@':
saltsafeio.fileexists(image)
infile = image
# if the input file is a list, throw an error
if basein[0] == '@':
raise SaltIOError(basein + ' list input instead of a file' )
# optionally update the FORTRAN config file with new values - not implemented currently
# If all looks OK, run the FORTRAN code
if len(pathin) > 0:
dir = pathin
else:
dir = './'
infile = basein
print dir, infile, 'input directory and input file'
# Get current working directory as the Fortran code changes dir
startdir = os.getcwd()
ringfilter_wrapper.ringfilter(dir,axc, ayc,arad, rxc,ryc,filterfreq,filterwidth,itmax,conv,fitwidth,infile)
# go back to starting directory
os.chdir(startdir)
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("saltfp$saltfpringfilter.par")
t = iraf.IrafTaskFactory(taskname="saltfpringfilter",value=parfile,function=saltfpringfilter,pkgname='saltfp')
|
Brett777/Predict-Churn
|
Deploy Persisted Scores.py
|
Python
|
mit
| 2,784
| 0.020474
|
import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import h2o
import numpy as np
import pandas as pd
from tabulate import tabulate
from sqlalchemy import create_engine
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def pr
|
edict_churn(State,AccountLength,AreaCode,Phone,IntlPlan,VMailPlan,VMailMessage,DayMins,DayCalls,DayCharge,EveMins,EveCalls,EveCharge,NightMins,NightCalls,NightCharge,IntlMins,IntlCalls,IntlCharge,CustServCalls):
# connect to the model scoring service
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
# open the
|
downloaded model
ChurnPredictor = h2o.load_model(path='AutoML-leader')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'State' : State,
'Account Length' : AccountLength,
'Area Code' : AreaCode,
'Phone' : Phone,
'Int\'l Plan' : IntlPlan,
'VMail Plan' : VMailPlan,
'VMail Message' : VMailMessage,
'Day Mins' : DayMins,
'Day Calls' : DayCalls,
'Day Charge' : DayCharge,
'Eve Mins' : EveMins,
'Eve Calls' : EveCalls,
'Eve Charge' : EveCharge,
'Night Mins' : NightMins,
'Night Calls' : NightCalls,
'Night Charge' : NightCharge,
'Intl Mins' :IntlMins,
'Intl Calls' : IntlCalls,
'Intl Charge' : IntlCharge,
'CustServ Calls' : CustServCalls}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityChurn = predictionsOut[1][1]
probabilityRetain = predictionsOut[1][2]
mySQL_Username = os.environ['BRETT_MYSQL_USERNAME']
mySQL_Password = os.environ['BRETT_MYSQL_PASSWORD']
mySQL_IP = os.environ['BRETT_MYSQL_IP']
engine = create_engine("mysql+mysqldb://"+mySQL_Username+":"+mySQL_Password+"@"+mySQL_IP+"/customers")
predictionsToDB = h2o.as_list(predictions, use_pandas=True)
predictionsToDB.to_sql(con=engine, name='predictions', if_exists='append')
return "Prediction: " + str(prediction) + " |Probability to Churn: " + str(probabilityChurn) + " |Probability to Retain: " + str(probabilityRetain)
|
glenmurphy/dropmocks
|
main.py
|
Python
|
mit
| 16,653
| 0.015613
|
import os, sys
from google.appengine.api.labs import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from django.utils import simplejson
import random
import datetime
import urllib
# FUNCTIONS -------------------------------------------------------------------
import string
ALPHABET = string.ascii_uppercase + string.ascii_lowercase + \
string.digits + '-_'
ALPHABET_REVERSE = dict((c, i) for (i, c) in enumerate(ALPHABET))
BASE = len(ALPHABET)
SIGN_CHARACTER = '$'
def id_encode(n):
if n < 0:
return SIGN_CHARACTER + num_encode(-n)
s = []
while True:
n, r = divmod(n, BASE)
s.append(ALPHABET[r])
if n == 0: break
return ''.join(reversed(s))
def id_decode(s):
if s[0] == SIGN_CHARACTER:
return -num_decode(s[1:])
n = 0
for c in s:
n = n * BASE + ALPHABET_REVERSE[c]
return n
def dbMockList(id):
return MockList.get_by_id(int(id_decode(id)))
def dbMock(id):
return Mock.get_by_id(int(id_decode(id)))
# MODELS ----------------------------------------------------------------------
class Owner(db.Model):
name = db.StringProperty(multiline=False)
email = db.StringProperty(multiline=False)
lastseen = db.DateTimeProperty(auto_now=True)
# Stores the signed-in-user ID.
user_id = db.StringProperty(multiline=False)
# Stores the not-signed-in-user ID.
hobo_id = db.StringProperty(multiline=False)
class MockList(db.Model):
owner = db.ReferenceProperty(Owner)
edit_key = db.StringProperty(multiline=False)
name = db.StringProperty(multiline=False)
description = db.StringProperty(multiline=False)
mocks = db.ListProperty(str, default=[])
mock_names_cache = db.ListProperty(str, default=[])
date = db.DateTimeProperty(auto_now_add=True)
views = db.IntegerProperty(default=0)
last_viewed = db.DateTimeProperty()
last_processed = db.DateTimeProperty(default=0)
def get_id(self):
return id_encode(self.key().id())
class MockListView(db.Model):
mocklist = db.ReferenceProperty(MockList)
date = db.DateTimeProperty(auto_now_add=True)
class Mock(db.Model):
name = db.StringProperty(multiline=False)
data = db.BlobProperty()
mimetype = db.StringProperty(multiline=False)
mocklist = db.ReferenceProperty(MockList)
date = db.DateTimeProperty(auto_now_add=True)
blob_key = blobstore.BlobReferenceProperty()
serving_url = db.StringProperty()
def get_id(self):
return id_encode(self.key().id())
def generateRandomKey():
return str(random.randint(0, sys.maxint))
def setCookie(handler, name, value):
expires = datetime.datetime.now() + datetime.timedelta(weeks=52)
expires_rfc822 = expires.strftime('%a, %d %b %Y %H:%M:%S GMT')
cookie = "%s=%s;expires=%s;path=/" % (name, value, expires_rfc822)
handler.response.headers.add_header('Set-Cookie', cookie)
def getOwner(handler, generate=False):
# Generate user if none exists.
user = users.get_current_user()
owner = None
if user:
owner = Owner.gql("WHERE user_id = :1", user.user_id()).get()
if not owner and 'hobo_id' in handler.request.cookies and handler.request.cookies['hobo_id'] != '':
owner = Owner.gql("WHERE hobo_id = :1", handler.request.cookies['hobo_id']).get()
if not owner and generate:
hobo_id = generateRandomKey()
owner = Owner()
owner.hobo_id = hobo_id
owner.put()
setCookie(handler, 'hobo_id', hobo_id)
return owner
def generateSignIn(handler, owner):
if owner and owner.user_id:
return "%s (<a href=\"%s\">Sign out</a>)" % (owner.name, users.create_logout_url("/"))
else:
return "<a href=\"%s\">Sign in</a>" % users.create_login_url("/signin/?d=" + handler.request.path)
def getOwnerDetails(handler, owner):
mocklists = []
if owner:
q = MockList.gql("WHERE owner = :1", owner).fetch(1000)
for mocklist in q:
mocklists.append({
'id' : mocklist.get_id(),
'name' : mocklist.name
})
return {
'mocklists' : mocklists,
'name' : str(owner.email),
'sign_in_url' : users.create_login_url("/signin/?d=" + handler.request.path),
'sign_out_url' : users.create_logout_url("/"),
'signed_in' : bool(users.get_current_user()),
}
else:
return {
'mocklists' : [],
'name' : '',
'sign_in_url' : users.create_login_url("/signin/?d=" + handler.request.path),
'sign_out_url' : users.create_logout_url("/"),
'signed_in' : False,
}
def cacheMockNames(mocklist):
mock_names = []
for mid in mocklist.mocks:
mock_names.append(dbMock(mid).name)
mocklist.mock_names_cache = mock_names
# MAIN ------------------------------------------------------------------------
class MainPage(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), "index.html")
# Get owner details
owner = getOwner(self, False)
self.response.out.write(template.render(path, {
'signin' : generateSignIn(self, owner),
'signed_in' : bool(users.get_current_user()),
'user' : simplejson.dumps(getOwnerDetails(self, owner))
}))
class View(webapp.RequestHandler):
def get(self, id):
# Get mocklist.
mocklist = dbMockList(id)
if not mocklist:
self.response.out.write("MockList not found")
return
if len(mocklist.mock_names_cache) != len(mocklist.mocks):
cacheMockNames(mocklist)
mocklist.put()
mocks = []
i = 0
for mock_id in mocklist.mocks:
mocks.append({
'id' : mock_id,
'name' : mocklist.mock_names_cache[i]
})
i = i
|
+ 1
if mocklist.name:
name = mocklist.name.encode('utf-8')
else:
name = ''
mocklistdata = {
'name' : name,
'id' : str(mocklist.get_id()),
'description' : str(mocklist.description),
'mocks' : mock
|
s,
}
# Check if current user owns it.
owner = getOwner(self, False)
if owner and mocklist.owner and mocklist.owner.key() == owner.key():
mocklistdata['key'] = mocklist.edit_key
json = simplejson.dumps(mocklistdata)
path = os.path.join(os.path.dirname(__file__), "viewer.html")
self.response.out.write(template.render(path, {
'json': json,
'user' : simplejson.dumps(getOwnerDetails(self, owner))
}))
view = MockListView()
view.mocklist = mocklist
view.put()
class ViewMock(webapp.RequestHandler):
def get(self, id):
mock = dbMock(id)
if mock.mimetype:
self.response.headers['Content-Type'] = mock.mimetype
else:
self.response.headers['Content-Type'] = 'image/png'
if mock.data:
self.response.out.write(mock.data)
elif mock.blob_key:
blob_reader = blobstore.BlobReader(mock.blob_key)
self.response.out.write(str(blob_reader.read()))
# self.redirect(mock.serving_url)
# self.redirect(mock.serving_url = images.get_serving_url(str(blob_info.key()), 800))
class UploadMock(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
id = self.request.get('id')
edit_key = self.request.get('key')
filename = self.request.get('filename')
import logging
logging.info(filename)
mocklist = dbMockList(id)
if mocklist.edit_key != edit_key:
self.response.out.write('invalid edit key')
return
mock = Mock()
mock.name = filename
if self.request.POST:
# FormData
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
mock.blob_key = blob_info.key()
image = images.Image(blob_key=str(blob_info.key()))
image.horizontal_flip()
image.execute_transforms()
if image.width > 1280:
mock.serving_url = images.get_serving_url(str(blob_info.key()), 1280)
else:
mock.serving_url = images.get_serving_url(str(blob_info.key()))
else:
# Firefo
|
fishel/yarnnlm
|
sample.py
|
Python
|
mit
| 644
| 0.041925
|
#!/usr/bin/env python3
i
|
mport sys
import rnnlm
import txt
import pickle
from keras.models import load_model
if __name__ == "__main__":
modelInFile = sys.argv[1]
dictInFile = sys.argv[2]
try:
catSpec = sys.argv[3]
except IndexError:
catSpec = None
numToSample = 1
(mdl, params) = rnnlm.loadModels(modelInFile, dictInFile)
for _ in range(numToSample):
if catSpec:
specVec = txt.spec2vec(params, catSpec)
else:
spec, specVec = txt.rndCatVec(params)
print(spec)
raw, prob = rnnlm.sample(mdl
|
, params, specVec)
decoded = [str(params.i2w[i]) for i in raw]
print("".join(decoded) + " (" + str(prob) + ")")
|
dkarakats/edx-platform
|
lms/djangoapps/certificates/queue.py
|
Python
|
agpl-3.0
| 19,745
| 0.001722
|
"""Interface for adding certificate generation tasks to the XQueue. """
import json
import random
import logging
import lxml.html
from lxml.etree import XMLSyntaxError, ParserError # pylint:disable=no-name-in-module
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from requests.auth import HTTPBasicAuth
from courseware import grades
from xmodule.modulestore.django import modulestore
from capa.xqueue_interface import XQueueInterface
from capa.xqueue_interface import make_xheader, make_hashkey
from student.models import UserProfile, CourseEnrollment
from verify_student.models import SoftwareSecurePhotoVerification
from certificates.models import (
GeneratedCertificate,
certificate_status_for_student,
CertificateStatuses as status,
CertificateWhitelist,
ExampleCertificate
)
LOGGER = logging.getLogger(__name__)
class XQueueAddToQueueError(Exception):
"""An error occurred when adding a certificate task to the queue. """
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
super(XQueueAddToQueueError, self).__init__(unicode(self))
def __unicode__(self):
return (
u"Could not add certificate to the XQueue. "
u"The error code was '{code}' and the message was '{msg}'."
).format(
code=self.error_code,
msg=self.error_msg
)
class XQueueCertInterface(object):
"""
XQueueCertificateInterface provides an
interface to the xqueue server for
managing student certificates.
Instantiating an object will create a new
connection to the queue server.
See models.py for valid state transitions,
summary of methods:
add_cert: Add a new certificate. Puts a single
request on the queue for the student/course.
Once the certificate is generated a post
will be made to the update_certificate
view which will save the certificate
download URL.
regen_cert: Regenerate an existing certificate.
For a user that already has a certificate
this will delete the existing one and
generate a new cert.
del_cert: Delete an existing certificate
For a user that already has a certificate
this will delete his cert.
"""
def __init__(self, request=None):
# Get basic auth (username/password) for
# xqueue connection if it's in the settings
if settings.XQUEUE_INTERFACE.get('basic_auth') is not None:
requests_auth = HTTPBasicAuth(
*settings.XQUEUE_INTERFACE['basic_auth'])
else:
requests_auth = None
if request is None:
factory = RequestFactory()
self.request = factory.get('/')
else:
self.request = request
self.xqueue_interface = XQueueInterface(
settings.XQUEUE_INTERFACE['url'],
settings.XQUEUE_INTERFACE['django_auth'],
requests_auth,
)
self.whitelist = CertificateWhitelist.objects.all()
self.restricted = UserProfile.objects.filter(allow_certificate=False)
self.use_https = True
def regen_cert(self, student, course_id, course=None, forced_grade=None, template_file=None):
"""(Re-)Make certificate for a particular student in a particular course
Arguments:
student - User.object
course_id - courseenrollment.course_id (string)
WARNING: this command will leave the old certificate, if one exists,
laying around in AWS taking up space. If this is a problem,
take pains to clean up storage before running this command.
Change the certificate status to unavailable (if it exists) and request
grading. Passing grades will put a certificate request on the queue.
Return the status object.
"""
# TODO: when del_cert is implemented and plumbed through certificates
# repo also, do a deletion followed by a creation r/t a simple
# recreation. XXX: this leaves orphan cert files laying around in
# AWS. See note in the docstring too.
try:
certificate = GeneratedCertificate.objects.get(user=student, course_id=course_id)
LOGGER.info(
(
u"Found an existing certificate entry for student %s "
u"in course '%s' "
u"with status '%s' while regenerating certificates. "
),
student.id,
unicode(course_id),
certificate.status
)
certificate.status = status.unavailable
certificate.save()
LOGGER.info(
(
u"The certificate status for student %s "
u"in course '%s' has been changed to '%s'."
),
student.id,
unicode(course_id),
certificate.status
)
except GeneratedCertificate.DoesNotExist:
pass
return self.add_cert(student, course_id, course, forced_grade, template_file)
def del_cert(self, student, course_id):
"""
Arguments:
student - User.object
course_id - courseenrollment.course_id (string)
Removes certificate for a student, will change
the certificate status to 'deleting'.
Certificate must be in the 'error' or 'downloadable' state
otherwise it will return the current state
"""
raise NotImplementedError
def add_cert(self, student, course_id, course=None, forced_grade=None, template_file=None, title='None'):
"""
Request a new certificate for a student.
Arguments:
student - User.object
course_id - courseenrollment.course_id (CourseKey)
force
|
d_grade - a string indicating a grade parameter to pass with
the certificate request. If this is given, grading
will be skipped.
Will change the certificate status to 'generating'.
Certificate must be in the 'unavailable', 'error',
'deleted' or 'generating' state.
If a student has a passing grade or is in the whitelist
table for the course
|
a request will be made for a new cert.
If a student has allow_certificate set to False in the
userprofile table the status will change to 'restricted'
If a student does not have a passing grade the status
will change to status.notpassing
Returns the student's status
"""
valid_statuses = [
status.generating,
status.unavailable,
status.deleted,
status.error,
status.notpassing,
status.downloadable
]
cert_status = certificate_status_for_student(student, course_id)['status']
new_status = cert_status
if cert_status not in valid_statuses:
LOGGER.warning(
(
u"Cannot create certificate generation task for user %s "
u"in the course '%s'; "
u"the certificate status '%s' is not one of %s."
),
student.id,
unicode(course_id),
cert_status,
unicode(valid_statuses)
)
else:
# grade the student
# re-use the course passed in optionally so we don't have to re-fetch everything
# for every student
if course is None:
course = modulestore().get_course(course_id, depth=0)
profile = UserProfile.objects.get(user=student)
profile_name = profile.name
# Needed
self.request.user = student
self.request.session = {}
|
comprobo-final-project/comprobo_final_project
|
comprobo_final_project/scripts/gene_alg_v1/chromosome.py
|
Python
|
mit
| 3,598
| 0.003057
|
#!usr/bin/env python
"""
Basic class that represents the chomosomes of our genetic algorithm.
"""
import random
import numpy as np
# The number of genes that each organism has
NUM_GENES = 4
# Boundary values for genes
GENE_MAX = 10000
GENE_MIN = -10000
class Chromosome:
"""
Holds the genes and fitness of an organism.
"""
def __init__(self, genes=None, supervisor=None):
"""
Initializes the gene and fitness of an organism.
"""
# Generate a random set of genes if genes are not already defined
if genes == None:
genes = []
for x in range(NUM_GENES):
genes.append(round(random.uniform(-5, 5), 3))
# Define the chromosome's genes and fitness
self.genes = genes
self.supervisor = supervisor
self.fitness = self.get_fitness()
def crossover(self, other):
"""
Mixes the two specified chromosomes, returning two new chromosomes
that are a result of a crossover of the two original chromosomes.
other: second chromosome to crossover
return: two chromosomes that are crossovers between self and other
"""
# Define the genes that will be crossovered
g1 = self.genes
g2 = other.genes
# Define a random pivot point around which the crossover will occur
crossover_point = random.randint(0, NUM_GENES-1)
# Create the new crossovered genes and chromosome
new_genes_1 = g1[:crossover_point] + g2[crossover_point:]
new_genes_2 = g2[:crossover_point] + g1[crossover_point:]
new_chromosome_1 = Chromosome(new_genes_1, self.supervisor)
new_chromosome_2 = Chromosome(new_genes_2, self.supervisor)
return new_chromosome_1, new_chromosome_2
def mutate(self):
"""
Mutates a single random gene of the specified chromosome.
"""
# Initialize what will be the final list of mutated genes
mutated_genes = self.genes
# Select a random gene and multiply it with a random value
index_to_mutate = random.randint(0, len(self.genes) - 1)
mutated_genes[index_to_mutate] *= random.uniform(0
|
.5, 2)
# Clip and round all genes
mutated_genes[index_to_mutate] = np.clip(mutated_genes[index_to_mutate],
GENE_MIN, GENE_MAX)
mutated_genes = [round(gene, 3) for gene in mutated_genes]
# Create new chromosome with genes from the mutated gen
|
es
return Chromosome(mutated_genes, self.supervisor)
def get_fitness(self):
"""
Calculate the fitness of a specified chromosome.
"""
# Apply current chromosome's genes to the supervisor
self.supervisor.use_genes(self.genes)
# Calculate fitness
positions = self.supervisor.run() # all positions
distances = [np.sqrt(position.x**2 + position.y**2) \
for position in positions] # all distances from goal
fitness = np.mean(distances) # average distance from goal
# Reset the supervisor to accept new genes
self.supervisor.reset()
return fitness
if __name__ == '__main__':
# Test basic functionality
c1 = Chromosome()
c2 = Chromosome()
print "First generation:"
print c1.genes
print c2.genes, "\n"
c3, c4 = c1.crossover(c2)
print "Second generation (after crossover):"
print c3.genes
print c4.genes, "\n"
c3.mutate()
c4.mutate()
print "Second generation (after mutation):"
print c3.genes
print c4.genes
|
sammypg/youtube_downloader
|
setup.py
|
Python
|
mit
| 470
| 0.008511
|
# For building youtube_downloader on windows
from distutils.core import setup
impor
|
t py2exe
# Define wh
|
ere you want youtube_downloader to be built to below
build_dir =
data_files = [('',['settings.ini',
'LICENSE',
'README.md']),
('sessions',[])]
options = {'py2exe': {
'dist_dir': build_dir}}
setup(
windows=['youtube_downloader.py'],
data_files=data_files,
options=options)
|
HewlettPackard/python-hpOneView
|
examples/ethernet_networks.py
|
Python
|
mit
| 5,900
| 0.001525
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
confi
|
g = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
options = {
"name": "OneViewSDK Test Ethernet Network",
"vlanId": 200,
"ethernetNetworkType": "Tagged",
"purpose": "General",
"smartLink": False,
"privateNetwork": False,
"connectionTemplateUri": None
}
options_bulk = {
"vlanIdRange": "1-5,7",
"purpose": "General",
"namePrefix": "TestNetwork",
"smartLink": False,
"privateNetwork": False,
"bandwidth": {
"maximumBandwidth": 10000,
"typicalBandwidth": 2000
}
}
# Scope name to perform the patch operation
scope_name = ""
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Create an ethernet Network
print("\nCreate an ethernet network")
ethernet_network = oneview_client.ethernet_networks.create(options)
print("Created ethernet-network '{name}' successfully.\n uri = '{uri}'" .format(**ethernet_network))
# Find recently created network by name
print("\nFind recently created network by name")
ethernet_network = oneview_client.ethernet_networks.get_by(
'name', 'OneViewSDK Test Ethernet Network')[0]
print("Found ethernet-network by name: '{name}'.\n uri = '{uri}'" .format(**ethernet_network))
# Update purpose recently created network
print("\nUpdate the purpose attribute from the recently created network")
ethernet_network['purpose'] = 'Management'
ethernet_network = oneview_client.ethernet_networks.update(ethernet_network)
print("Updated ethernet-network '{name}' successfully.\n uri = '{uri}'\n with attribute ['purpose': {purpose}]"
.format(**ethernet_network))
# Get all, with defaults
print("\nGet all ethernet-networks")
ethernet_nets = oneview_client.ethernet_networks.get_all()
for net in ethernet_nets:
print(" '{name}' at uri: '{uri}'".format(**net))
# Create bulk ethernet networks
print("\nCreate bulk ethernet networks")
ethernet_nets_bulk = oneview_client.ethernet_networks.create_bulk(options_bulk)
pprint(ethernet_nets_bulk)
# Filter by name
print("\nGet all ethernet-networks filtering by name")
ethernet_nets_filtered = oneview_client.ethernet_networks.get_all(
filter="\"'name'='OneViewSDK Test Ethernet Network'\"")
for net in ethernet_nets_filtered:
print(" '{name}' at uri: '{uri}'".format(**net))
# Get all sorting by name descending
print("\nGet all ethernet-networks sorting by name")
ethernet_nets_sorted = oneview_client.ethernet_networks.get_all(sort='name:descending')
for net in ethernet_nets_sorted:
print(" '{name}' at uri: '{uri}'".format(**net))
# Get the first 10 records
print("\nGet the first ten ethernet-networks")
ethernet_nets_limited = oneview_client.ethernet_networks.get_all(0, 10)
for net in ethernet_nets_limited:
print(" '{name}' at uri: '{uri}'".format(**net))
ethernet_network_uri = ethernet_network['uri']
# Get by Uri
print("\nGet an ethernet-network by uri")
ethernet_nets_by_uri = oneview_client.ethernet_networks.get(ethernet_network_uri)
pprint(ethernet_nets_by_uri)
# Get URIs of associated profiles
print("\nGet associated profiles uri(s)")
associated_profiles = oneview_client.ethernet_networks.get_associated_profiles(ethernet_network_uri)
pprint(associated_profiles)
# Get URIs of uplink port group
print("\nGet uplink port group uri(s)")
uplink_group_uris = oneview_client.ethernet_networks.get_associated_uplink_groups(ethernet_network_uri)
pprint(uplink_group_uris)
# Get the associated uplink set resources
print("\nGet uplink port group uri(s)")
uplink_groups = []
for uri in uplink_group_uris:
uplink_groups.append(oneview_client.uplink_sets.get(uri))
pprint(uplink_groups)
# Adds ethernet to scope defined
if scope_name:
print("\nGet scope then add the network to it")
scope = oneview_client.scopes.get_by_name(scope_name)
ethernet_with_scope = oneview_client.ethernet_networks.patch(ethernet_network_uri,
'replace',
'/scopeUris',
[scope['uri']])
pprint(ethernet_with_scope)
# Delete bulk ethernet networks
print("\nDelete bulk ethernet networks")
for net in ethernet_nets_bulk:
oneview_client.ethernet_networks.delete(net)
print(" Done.")
# Delete the created network
print("\nDelete the ethernet network")
oneview_client.ethernet_networks.delete(ethernet_network)
print("Successfully deleted ethernet-network")
|
vrsys/avangong
|
examples/inspector/inspector_qt.py
|
Python
|
lgpl-3.0
| 13,470
| 0.0049
|
# -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
#
|
#
##########################################################################
import avango.osg
import avango.osg.simpleviewer
import avango.script
from elasticnodes import *
import sys
import random
from PySide import QtCore, QtGui
#from PyQt4 import QtCore, QtGui
class TreeItem:
def __init__(self, data, parent=None):
self.parentItem = parent
s
|
elf.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
return self.itemData[column]
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, columnDesc, parent=None):
QtCore.QAbstractItemModel.__init__(self, parent)
self.columnDesc = []
for desc in columnDesc:
self.columnDesc.append(QtCore.QVariant(desc))
self.rootItem = TreeItem(self.columnDesc)
def clear(self):
self.rootItem = TreeItem(self.columnDesc)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return QtCore.QVariant()
if role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
item = index.internalPointer()
return QtCore.QVariant(item.data(index.column()))
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsEnabled
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return QtCore.QVariant()
def index(self, row, column, parent):
if row < 0 or column < 0 or row >= self.rowCount(parent) or column >= self.columnCount(parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
# def setupModelData(self, rootNode):
# print "setupModelData"
#
# def update_tree_model(node,tree_item_parent):
# for i in xrange(node._get_num_fields()):
# name = node._get_field_name(i)
# field = node._get_field(i)
# value = field.value
# if name != "Children":
# continue
#
# for child in value:
# new_parent_node = TreeItem([child.Name.value, name],tree_item_parent)
# tree_item_parent.appendChild(new_parent_node)
# update_tree_model(child,new_parent_node)
#
#
# for child in rootNode.value:
# new_parent_node = TreeItem([child.Name.value, "RootChild"],self.rootItem)
# self.rootItem.appendChild(new_parent_node)
# update_tree_model(child, new_parent_node)
#
# self.reset()
class NodeTreeModel(TreeModel):
def __init__(self, columnDesc, parent=None):
super(NodeTreeModel,self).__init__(columnDesc,parent)
def setupModelData(self, rootNode):
print "setupModelData"
def update_tree_model(node,tree_item_parent):
for i in xrange(node._get_num_fields()):
name = node._get_field_name(i)
field = node._get_field(i)
value = field.value
if name != "Children":
continue
for child in value:
new_parent_node = TreeItem([child.Name.value, name],tree_item_parent)
tree_item_parent.appendChild(new_parent_node)
update_tree_model(child,new_parent_node)
for child in rootNode.value:
new_parent_node = TreeItem([child.Name.value, "RootChild"],self.rootItem)
self.rootItem.appendChild(new_parent_node)
update_tree_model(child, new_parent_node)
self.reset()
def createGraphNodes(rootNode,graphWidget):
def update_tree_model(node, tree_item_parent, graphWidget, items):
for i in xrange(node._get_num_fields()):
name = node._get_field_name(i)
field = node._get_field(i)
value = field.value
if name != "Children":
continue
for child in value:
#create new node
actual_node = Node(graphWidget, child.Name.value, child)
random.seed(42)
x = random.randint(0,75)
y = random.randint(0,75)
actual_node.setPos(x, y)
#add to list
items.append(actual_node)
items.append(Edge(tree_item_parent, actual_node))
update_tree_model(child,actual_node,graphWidget,items)
items = []
parent_node = Node(graphWidget,"Root",rootNode)
parent_node.setPos(50, 50)
items.append(parent_node)
for child in rootNode.value:
update_tree_model(child, parent_node, graphWidget, items)
return items
class ScenegraphTreeNodeWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.createNodeWidgets()
self.fieldsWidget = QtGui.QWidget()
self.fieldsLayout = QtGui.QVBoxLayout()
self.fieldsWidget.setLayout(self.fieldsLayout)
self.createFieldWidgets()
self.globalLayout = QtGui.QHBoxLayout()
self.globalLayout.addWid
|
weiweihuanghuang/Glyphs-Scripts
|
Masters/Show next instance.py
|
Python
|
apache-2.0
| 623
| 0.035313
|
#MenuTitle: Show next instance
# -*- coding: utf-8 -*-
__doc__="""
Jumps to next inst
|
ance shown in the preview field of the current Edit tab.
"""
import GlyphsApp
Doc = Glyphs.currentDocument
numberOfInstances = len( Glyphs.font.instances )
try:
currentInstanceNumber = Doc.windowController().activeEditViewController().selectedInstance()
if currentInstanceNumber < numberOfInstances:
Doc.windowController().activeEditViewController().setSelectedInstance_( currentInstanceNumber + 1 )
else:
Doc.windowController().activeEditViewController().
|
setSelectedInstance_( 1 )
except Exception, e:
print "Error:", e
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/grant_access_data.py
|
Python
|
mit
| 1,336
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license infor
|
mation.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GrantAccessData(Model):
"""Data used for requesting a SAS.
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2016_04_30
|
_preview.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS access
expires.
:type duration_in_seconds: int
"""
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'AccessLevel'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(self, access, duration_in_seconds):
super(GrantAccessData, self).__init__()
self.access = access
self.duration_in_seconds = duration_in_seconds
|
UNICT-DMI/Telegram-DMI-Bot
|
module/data/vars.py
|
Python
|
gpl-3.0
| 2,524
| 0.003223
|
""" aulario.py """
BACK_BUTTON_TEXT = "Indietro ❌"
DAY_SELECTION = "Seleziona la data della lezione che ti interessa."
AULARIO_WARNING = "⚠️ Aulario non ancora pronto, riprova fra qualche minuto ⚠️"
LESSON_SELECTION = "Quale lezione devi seguire?"
NO_LESSON_WARNING = "Nessuna lezione programmata per questo giorno"
""" drive_contribute.py """
NO_USERNAME_WARNING = "Nessuno username"
USE_TEXT = "USO: /drive_contribute [e-mail] [motivazione]\n\nESEMPIO: /drive_contribute mario.rossi@gmail.com Vorrei caricare i miei appunti di Fondamenti di Informatica"
CONFIRM_ACCESS = "Hai ottenuto l'accesso in scrittura alla cartella Drive! \n\nPresto ti arriverà un'email di conferma per gli accessi in scrittura e potrai aggiungere appunti nella cartella mediante questo link https://cutt.ly/unict-dmi-drive"
VALIDATION_ERROR = "Si é verificato un errore durante la validazione dell'email, riprova più tardi o verifica se hai già gli accessi in scrittura alla cartella mediante questo link https://cutt.ly/unict-dmi-drive"
""" esami.py """
PRIVATE_USE_WARNING = "Questo comando è utilizzabile solo in privato"
GROUP_WARNING = "Dal comando /esami che hai eseguito in un gruppo"
""" gdrive.py """
NO_GROUP_WARNING = "La funzione /drive non è ammessa nei gruppi"
ERROR_DEVS = "Si è verificato un errore, ci scusiamo per il disagio. Contatta i devs. /help"
""" help.py """
DIPARTIMENTO_CDL = "🏢 Dipartimento e CdL"
REGOLAMENTO_DIDATTICO = "🪧 Regolamento Didattico"
SEGRETERIA_CONTATTI = "🕐 Segreteria orar
|
i e contatti"
ERSU_ORARI = "🍽 ERSU orari e cont
|
atti"
APPUNTI_CLOUD = "☁️ Appunti & Cloud"
PROGETTI_RICONOSCIMENTI = "🏅 Progetti e Riconoscimenti"
ALL_COMMANDS = "Tutti i comandi"
CLOSE = "❌ Chiudi"
BACK_TO_MENU = "🔙 Torna al menu"
""" lezioni.py """
LE_USE_WARNING = "Questo comando è utilizzabile solo in privato"
LE_GROUP_WARNING = "Dal comando lezioni che hai eseguito in un gruppo"
""" regolmento_didattico """
GRAD_SELECT = "Scegliere uno dei corsi di laurea:"
YEAR_SELECT = "Scegliere il regolamento in base al proprio anno di immatricolazione:"
INF_COURSE = "Scegliere uno dei seguenti corsi (Informatica):"
MAT_COURSE = "Scegliere uno dei seguenti corsi (Matematica):"
RET_FILE = "Ecco il file richiesto:"
""" report.py """
REP_WARNING = "Errore. Inserisci la tua segnalazione dopo /report (Ad esempio /report Invasione ingegneri in corso.)"
""" stats.py """
EASTER_EGG = ("leiCheNePensaSignorina", "smonta_portoni", "santino", "bladrim", "prof_sticker")
|
slozier/ironpython2
|
Tests/Tools/modulediff.py
|
Python
|
apache-2.0
| 10,604
| 0.006978
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import logmodule
import sys
from System.IO import File, Directory
from System.Diagnostics import Process
BUG_REPORT_PRE = """Implement rest of %s module
IP VERSION AFFECTED: %s
FLAGS PASSED TO IPY.EXE: None
OPERATING SYSTEMS AFFECTED: All
DESCRIPTION
"""
def generate(exe, mod_name):
proc = Process()
proc.StartInfo.FileName = exe
proc.StartInfo.Arguments = "logmodule.py " + mod_name
proc.StartInfo.UseShellExecute = False
proc.StartInfo.RedirectStandardOutput = True
if not proc.Start():
raise Exception("process failed to start")
return proc.StandardOutput.ReadToEnd()
def add_diff(type, diffDict, path, line):
curDict = diffDict
for item in path:
nextDict = curDict.get(item, None)
if nextDict is None:
nextDict = curDict[item] = {}
curDict = nextDict
curList = curDict.get(type, None)
if curList is None:
curDict[type] = curList = []
curList.append(line)
def first_non_whitespace(s):
spaceCnt = 0
for c in s:
if c != ' ':
break
spaceCnt += 1
return spaceCnt
class ModuleDiffer(object):
def __init__(self, mod_name, cpy_path):
self.mod_name = mod_name
self.cpy_path = cpy_path
self.cpyout = generate(cpy_path, mod_name)
self.ipyout = generate(sys.executable, mod_name)
self.path = []
self.cur_indent = ''
self.next_indent = ' '
self.prevline, self.previpyline = '', ''
self.new_indent = True
self.diffs = {}
self.ipyindex, self.cpyindex = 0, 0
self.ipytext, self.cpytext = self.ipyout.split('\n'), self.cpyout.split('\n')
def process(self):
while self.cpyindex < len(self.cpytext) and self.ipyindex < len(self.ipytext):
self.cpyline, self.ipyline = self.cpytext[self.cpyindex], self.ipytext[self.ipyindex]
self.cspaceCnt = first_non_whitespace(self.cpyline)
self.ispaceCnt = first_non_whitespace(self.ipyline)
# update our path to this line if the indentation level has changed.
if self.cspaceCnt > self.ispaceCnt:
self.basic_line = self.cpyline
else:
self.basic_line = self.ipyline
self.process_indent()
if self.cspaceCnt == self.ispaceCnt:
if self.compare_lines_same_indent():
continue
else:
self.compare_lines_different_indent()
self.prevline, self.previpyline = self.cpyline, self.ipyline
if self.cpyindex != len(self.cpytext):
# we don't hit this, so we don't support this yet
raise Exception()
elif self.ipyindex != len(self.ipytext):
# we don't hit this, so we don't support this yet
raise Exception()
return self.diffs
def process_indent(self):
if self.basic_line.startswith(self.next_indent):
self.cur_indent = self.next_indent
self.next_indent = self.next_indent + ' '
newpath, newipypath = self.prevline.strip(), self.previpyline.strip()
if newpath.startswith('Module: '): newpath = newpath[8:]
if newipypath.startswith('Module: '): newipypath = newipypath[8:]
if newpath != newipypath and self.cspaceCnt == self.ispaceCnt:
self.path.append(newpath + '/' + newipypath)
else:
self.path.append(newpath)
self.new_indent = True
else:
while not self.basic_line.startswith(self.cur_indent):
self.next_indent = self.cur_indent
self.cur_indent = self.cur_indent[:-2]
self.path.pop()
self.new_indent = True
def compare_lines_same_indent(self):
self.basic_line = self.cpyline
if self.cpyline == self.ipyline or self.cspaceCnt == 0:
# no difference
self.ipyindex += 1
self.cpyindex += 1
self.prevline, self.previpyline = self.cpyline, self.ipyline
return True
# same indentation level
if self.cpyline > self.ipyline:
# ipy has an extra attribute
add_diff('+', self.diffs , self.path, self.ipyline)
self.ipyindex += 1
else:
# ipy is missing an attribute
add_diff('-', self.diffs, self.path, self.cpyline)
self.cpyindex += 1
def compare_lines_different_indent(self):
if self.cspaceCnt > self.
|
ispaceCnt:
while first_non_whitespace(self.cpytext[self.cpyindex]) > self.ispaceCnt:
add_diff('-', self.diffs , self.path, self.cpytext[self.cpyindex])
self.cpyindex += 1
else:
while first_non_whitespace(self.ipytext[self.ipyindex]) > self.cspaceCnt:
|
add_diff('+', self.diffs , self.path, self.ipytext[self.ipyindex])
self.ipyindex += 1
def diff_module(mod_name, cpy_path):
return ModuleDiffer(mod_name, cpy_path).process()
def collect_diffs(diffs, type):
res = []
collect_diffs_worker(res, '', diffs, type)
return res
def collect_diffs_worker(res, path, diffs, diffType):
for key in sorted(list(diffs)):
if (key == '+' or key == '-') and key != diffType:
continue
value = diffs[key]
if type(value) is dict:
if path:
newpath = path + '.' + key
else:
newpath = key
collect_diffs_worker(res, newpath, value, diffType)
else:
for name in value:
res.append(path + name)
def gen_bug_report(mod_name, diffs, outdir):
needs_to_be_implemented = collect_diffs(diffs, '-')
needs_to_be_removed = collect_diffs(diffs, '+')
if not needs_to_be_implemented and not needs_to_be_removed:
return
bug_report_name = outdir + "\\%s.log" % mod_name
bug_report = open(bug_report_name, "w")
bug_report.write(BUG_REPORT_PRE % (mod_name, str(sys.winver)))
#--unfiltered list of attributes to be added
if len(needs_to_be_implemented)>0:
bug_report.write("-------------------------------------------------------\n")
bug_report.write("""Complete list of module attributes IronPython is still
missing implementations for:
""")
for x in needs_to_be_implemented:
bug_report.write(" " + x + '\n')
bug_report.write("\n\n\n")
#--unfiltered list of attributes to be removed
if len(needs_to_be_removed)>0:
bug_report.write("-------------------------------------------------------\n")
bug_report.write("""Complete list of module attributes that should be removed
from IronPython:
""")
for x in needs_to_be_removed:
bug_report.write(" " + x + '\n')
bug_report.close()
return bug_report_name
def check_baseline(bugdir, baselinedir, module):
bugfile = file(bugdir + '\\' + module + '.log')
basefile = file(baselinedir + '\\' + module + '.log')
fail = False
for bug, base in zip(bugfile, basefile):
comment = base.find('#')
if comment != -1:
base = base[:comment]
base, bug = base.strip(), bug.strip()
if base != bug:
print 'Differs from baseline!', base, bug
fail = True
return fail
def gen_one_report(module, cpy_path, outdir = 'baselines'):
if not Directory.Exists(outdir):
Directory.CreateDirectory(outdir)
print 'processing', module
diffs = diff_module(module, cpy_path)
return gen_bug_report(module, diffs, outdir)
def gen_all(cpy_path):
for module in ['types_only'] + logmodule.BUILTIN_MODULES:
gen_one_report
|
adewynter/Tools
|
Algorithms/stringOps.py
|
Python
|
mit
| 4,317
| 0.046792
|
# Adrian deWynter, 2017
# Implementation of various algorithms
# applied to strings
# Given a long string find the greater
# number that is also a palindrome.
def nextPalindrome(S):
def isPalindrome(x): return x == x[::-1]
while True:
S = S + 1
if isPalindrome(S): return S
# Given two words A,B find if A = rot(B)
def isRotation(A,B):
return B in A+A
# Print all possible combinations of a certain
# s \in {0,1}^* for a given wildcard (*)
def wS(s,i):
if i == len(s):
print "".join(s)
else:
if s[i] == "*":
s[i] = "1"
wS(s,i+1)
s[i] = "0"
wS(s,i+1)
else:
wS(s,i+1)
def allNonRepeatingWordsInTwoSentences(a,b):
# Hash a, hash b, print differences. O(a + b)
d = {}
def insertHash(x):
if x not in d:
d[x] = 1
else:
d[x] = d[x] + 1
for c in a:
insertHash(c.split(" "))
for c in b:
insertHash(c.split(" "))
ans = []
for k,v in d:
if d[k] > 1:
ans.append(d[k])
ans.append(" ")
print "".join(ans[:-1])
# Split a string into the minimum number of substrings
# such that each substring is a palindrome.
# This doesn't really work.
# Instead maintain an array:
# mincuts[i] = min cuts until i in S
# ispalindrome[i][j]
def minSubPalindromes(S):
p = []
M = [[None for _ in range(len(S))] for _ in range(len(S))]
for i in range(1,len(S)):
for i in range(1,len(S)):
if S[i] == S[j]:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
else:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
print M[-1][-1]
# Longest word made of words.
# I have no idea what it does.
def longestWordsMadeOfWords(W):
# First method
W.sort()
W=W[::-1]
i = 0
def splitWord(w):
ans = []
for i in range(1,len(w)):
ans.append( (w[:i], w[i:] ))
return ans
while i < len(W):
w = W[i]
for a,b in splitWord(w):
if a not in W or b not in W:
i = i + 1
break
return w
# Find smallest window if a string A containing all
# characters of another string B
def smallestWindow(A,B):
M = [[0 for _ in range(len(A))] for _ in range(len(B))]
M[0] = [1 if B[0] == A[i] else 0 for i in range(len(A))]
for i in range(len(B)): M[i][0] = 1 if A[0] == B[i] else 0
for i in range(1,len(A)):
for j in range(1,len(B)):
if A[i] == A[j]:
M[i][j] = max(1, M[i-1][j-1],M[i-1][j],M[i][j-1])
if M[-1][-1] == len(B): return 1
# Alphabetical order:
def alienAlphabet(A):
node = None
def insertNode(node,v):
node_ = Node()
node_.value = v
node_.next = None
node.next = node_
for k,v in A:
node = Node()
node.value = k[0]
for c in range(1,len(k)):
if node.value != k[c]:
node_ = node
while node.next is not None:
if node.value == k[c]:
break
else:
if node.next.value != k[c]:
insertNode(node,k[c])
node = node.next
if node.next is None and node.value != k[c]:
insertNode(node,k[c])
while node.next is not None: print node.value
# Find minimum nnumber of operations that can
# be performed to turn s1 into s2
def minNum(s1,s2):
def levensheinDistance(s1,s2,ls1=len(s1),ls2=len(s2)):
if ls1 == 0: return ls2
if ls2 == 0: return ls1
if s1[ls1-1] == s2[ls2-1]:
cost = 0
else:
cost = 1
return min(
levensheinDistance(s1,s2,ls1-1,ls2) + 1,
levensheinDistance(s1,s2,ls1,ls2-1) + 1,
levensheinDistance(s1,s2,ls1-1,ls2-1) + cost)
return levensheinDistance(s1,s2)
# Dynamic pro
|
gramming approach:
M = [[0 for _ in s1] for _ in s2]
for i in range(1,len(s1)):
for j in range(1,len(s2)):
if s1[i] != s2[j]:
M[i][j] = max(M[i-1][j],M[i][j-1],M[i-1][j-1])
print M[-1][-1]
# Find all positions where the anagram
|
of a substring
# S exists in A
# Complexity: O(A + S)
def needleHaystack(S,A):
indexes = []
T = sufixTree(A)
i = 0
while i < len(S):
k = T.findSubstring(S)
if k = len(S): indexes.append(k)
S = getNextAnagram(S)
return indexes
left,right = 0,0
count = len(S)
indexes = []
dic = {}
for c in S:
if c in S:
dic[c] = dic[c] + 1
else:
dic[c] = 0
while right < len(A):
right = right + 1
if A[right] in dic and A[right] >= 0:
A[right] = A[right] - 1
count = count -1
if count == 0: indexes.append(left)
left = left + 1
if right - left == len(S) and left in A and A[left] >= 0:
A[left] = A[left] + 1
count = count + 1
return indexes
|
enova/pgl_ddl_deploy
|
generate_new_native_tests.py
|
Python
|
mit
| 3,132
| 0.003831
|
#!/usr/bin/env python3
from shutil import copyfile
import glob
import os
sql = './sql'
expected = './expected'
NEW_FILES = ['native_features']
for file in NEW_FILES:
filelist = glob.glob(f"{sql}/*{file}.sql")
for path in filelist:
try:
os.remove(path)
except:
print("Error while deleting file : ", path)
filelist = glob.glob(f"{expected}/*{file}.out")
for path in filelist:
try:
os.remove(path)
except:
print("Error while deleting file : ", path)
files = {}
for filename in os.listdir(sql):
s
|
plit_filename = filename.split("_", 1)
number = int(split_filename[0])
files[number] = split_filename[1]
max_file_num = max(files.keys())
def construct_filename(n, name):
return f"{str(n).zfill(2)}_{name}"
contents = """
SET client_min_messages = warning;
DO $$
BEGIN
IF current_setting('server_version_num')::INT >= 100000 THEN
SET session_replication_role TO replica;
ELSE
CREATE EXTENSION pglogical
|
;
END IF;
END$$;
CREATE EXTENSION pgl_ddl_deploy;
CREATE OR REPLACE FUNCTION pgl_ddl_deploy.override() RETURNS BOOLEAN AS $BODY$
BEGIN
RETURN TRUE;
END;
$BODY$
LANGUAGE plpgsql IMMUTABLE;
INSERT INTO pgl_ddl_deploy.queue (queued_at,role,pubnames,message_type,message)
VALUES (now(),current_role,'{mock}'::TEXT[],pgl_ddl_deploy.queue_ddl_message_type(),'CREATE TABLE nativerox(id int)');
INSERT INTO pgl_ddl_deploy.queue (queued_at,role,pubnames,message_type,message)
VALUES (now(),current_role,'{mock}'::TEXT[],pgl_ddl_deploy.queue_ddl_message_type(),'ALTER TABLE nativerox ADD COLUMN bar text;');
INSERT INTO pgl_ddl_deploy.queue (queued_at,role,pubnames,message_type,message)
VALUES (now(),current_role,'{mock}'::TEXT[],pgl_ddl_deploy.queue_ddl_message_type(),$$SELECT pgl_ddl_deploy.notify_subscription_refresh('mock', true);$$);
DO $$
DECLARE v_ct INT;
BEGIN
IF current_setting('server_version_num')::INT >= 100000 THEN
SELECT COUNT(1) INTO v_ct FROM information_schema.columns WHERE table_name = 'nativerox';
RAISE LOG 'v_ct: %', v_ct;
IF v_ct != 2 THEN
RAISE EXCEPTION 'Count does not match expected: v_ct: %', v_ct;
END IF;
SELECT COUNT(1) INTO v_ct FROM pgl_ddl_deploy.subscriber_logs;
IF v_ct != 1 THEN
RAISE EXCEPTION 'Count does not match expected: v_ct: %', v_ct;
END IF;
PERFORM pgl_ddl_deploy.retry_all_subscriber_logs();
SELECT (SELECT COUNT(1) FROM pgl_ddl_deploy.subscriber_logs WHERE NOT succeeded) +
(SELECT COUNT(1) FROM pgl_ddl_deploy.subscriber_logs WHERE error_message ~* 'No subscription to publication mock exists') INTO v_ct;
IF v_ct != 3 THEN
RAISE EXCEPTION 'Count does not match expected: v_ct: %', v_ct;
END IF;
ELSE
SELECT COUNT(1) INTO v_ct FROM pgl_ddl_deploy.subscriber_logs;
IF v_ct != 0 THEN
RAISE EXCEPTION 'Count does not match expected: v_ct: %', v_ct;
END IF;
END IF;
END$$;
"""
fname = construct_filename(max_file_num + 1, 'native_features')
with open(f"{sql}/{fname}.sql", "w") as newfile:
newfile.write(contents)
copyfile(f"{sql}/{fname}.sql", f"{expected}/{fname}.out")
|
byt3bl33d3r/mitmproxy
|
libmproxy/protocol/http.py
|
Python
|
mit
| 56,839
| 0.000581
|
from __future__ import absolute_import
import Cookie
import copy
import threading
import time
import urllib
import urlparse
from email.utils import parsedate_tz, formatdate, mktime_tz
import netlib
from netlib import http, tcp, odict, utils
from netlib.http import cookies
from .tcp import TCPHandler
from .primitives import KILL, ProtocolHandler, Flow, Error
from ..proxy.connection import ServerConnection
from .. import encoding, utils, controller, stateobject, proxy
HDR_FORM_URLENCODED = "application/x-www-form-urlencoded"
HDR_FORM_MULTIPART = "multipart/form-data"
CONTENT_MISSING = 0
class KillSignal(Exception):
pass
def send_connect_request(conn, host, port, update_state=True):
upstream_request = HTTPRequest(
"authority",
"CONNECT",
None,
host,
port,
None,
(1, 1),
odict.ODictCaseless(),
""
)
conn.send(upstream_request.assemble())
resp = HTTPResponse.from_stream(conn.rfile, upstream_request.method)
if resp.code != 200:
raise proxy.ProxyError(resp.code,
"Cannot establish SSL " +
"connection with upstream proxy: \r\n" +
str(resp.assemble()))
if update_state:
conn.state.append(("http", {
"state": "connect",
"host": host,
"port": port}
))
return resp
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, o):
self.o = o
ce = o.headers.get_first("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.o.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.o.encode(self.ce)
class HTTPMessage(stateobject.StateObject):
"""
Base class for HTTPRequest and HTTPResponse
"""
def __init__(self, httpversion, headers, content, timestamp_start=None,
timestamp_end=None):
self.httpversion = httpversion
self.headers = headers
"""@type: odict.ODictCaseless"""
self.content = content
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
_stateobject_attributes = dict(
httpversion=tuple,
headers=odict.ODictCaseless,
content=str,
timestamp_start=float,
timestamp_end=float
)
_stateobject_long_attributes = {"content"}
def get_state(self, short=False):
ret = super(HTTPMessage, self).get_state(short)
if short:
if self.content:
ret["contentLength"] = len(self.content)
elif self.content == CONTENT_MISSING:
ret["contentLength"] = None
else:
ret["contentLength"] = 0
return ret
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return self.content
return encoding.decode(ce, self.content)
def decode(self):
"""
Decodes content based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns True if decoding succeeded, False otherwise.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return False
data = encoding.decode(ce, self.content)
if data is None:
return False
self.content = data
del self.headers["content-encoding"]
return True
def encode(self, e):
"""
Encodes content with the encoding e, where e is "gzip", "deflate"
or "identity".
"""
# FIXME: Error if there's an existing encoding header?
self.content = encoding.encode(e, self.content)
self.headers["content-encoding"] = [e]
def size(self, **kwargs):
"""
Size in bytes of a fully rendered message, including headers and
HTTP lead-in.
"""
hl = len(self._assemble_head(**kwargs))
if self.content:
return hl + len(self.content)
else:
return hl
def copy(self):
c = copy.copy(self)
c.headers = self.headers.copy()
return c
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both the headers
and the body of the message. Encoded content will be decoded
before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
with decoded(self):
self.content, c = utils.safe_subn(
pattern, repl, self.content, *args, **kwargs
)
c += self.headers.replace(pattern, repl, *args, **kwargs)
return c
def _assemble_first_line(self):
"""
Returns the assembled request/response line
"""
raise NotImplementedError() # pragma: nocover
def _assemble_headers(self):
"""
Returns the assembled headers
"""
raise NotImplementedError() # pragma: nocover
def _assemble_head(self):
"""
Returns the assembled request/response line plus headers
"""
raise NotImplementedError() # pragma: nocover
def assemble(self):
"""
Returns the assembled request/response
"""
raise NotImplementedError() # pragma: nocover
class HTTPRequest(HTTPMessage):
"""
An HTTP request.
Exposes the following attributes:
method: HTTP method
scheme: URL scheme (http/https)
host: Target hostname of the request. This is not neccessarily the
directy upstream server (which could be another proxy), but it's always
the target server we want to reach at the end. This attribute is either
inferred from the request itself (absolute-form, authority-form) or from
the connection metadata (e.g. the host in reverse proxy mode).
port: Destination port
path: Path portion of the URL (not present in authority-form)
httpversion: HTTP version tuple, e.g. (1,1)
headers: odict.ODictCaseless object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
form_in: The request form which mitmproxy has received. The following
values are possible:
- relative (GET /index.html, OPTIONS *) (covers origin form and
asterisk form)
- absolute (GET http://example.com:80/index.html)
- authority-form (CONNECT example.com:443)
Details: http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-25#section-5.3
form_out: The request form which mitmproxy will send out to the
destination
timestamp_start: Timestamp indicating when requ
|
est transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
content,
timestamp_start=None,
timestamp_end=None,
form_out=None
):
assert isinstance(headers, odict.ODic
|
tCaseless) or not headers
HTT
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/abstract_class_instantiated_in_class.py
|
Python
|
apache-2.0
| 326
| 0
|
"""Don't warn if the class is i
|
nstantiated in its own body.
|
"""
# pylint: disable=missing-docstring
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Ala(object):
@abc.abstractmethod
def bala(self):
pass
@classmethod
def portocala(cls):
instance = cls()
return instance
|
rfhk/tks-custom
|
account_analytic_line_sale/models/__init__.py
|
Python
|
agpl-3.0
| 181
| 0
|
# -*- coding: utf-8 -*
|
-
# Copyright 2017 Rooms For (Hong Kong) Limited T/A OSCG
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from . import
|
account_analytic_line
|
PyPlanet/PyPlanet
|
pyplanet/apps/core/pyplanet/views/toolbar.py
|
Python
|
gpl-3.0
| 1,256
| 0.027866
|
from pyplanet.views import TemplateView
class ToolbarView(TemplateView):
template_name = 'core.pyplanet/toolbar.xml'
def __init__(s
|
elf, app, *args, **kwargs):
"""
Initiate Player Toolbar
:param app: App instance.
:type app: pyplanet.apps.core.pyplanet.app.PyPlanetConfig
"""
super().__init__(*args, **kwargs)
self.id = 'pyplanet__toolbar'
self.app = app
self.manager = self.app.context.ui
self.commands = {
'bar_button_list': '/list',
'bar_button_mf': '/mf',
'bar_button_jb': '/jukebox list',
'bar_button_skip': '/skip',
'bar_butt
|
on_extend': '/extend',
'bar_button_replay': '/replay',
'bar_button_players': '/players',
'bar_button_topdons': '/topdons',
'bar_button_topsums': '/topsums',
'bar_button_topactive': '/topactive',
'bar_button_mxinfo': '/{} info'.format('tmx' if self.app.instance.game.game == 'tmnext' else 'mx'),
'bar_button_help': '/helpall',
}
async def get_context_data(self):
data = await super().get_context_data()
data['game'] = self.app.instance.game.game
return data
async def handle_catch_all(self, player, action, values, **kwargs):
if action not in self.commands:
return
await self.app.instance.command_manager.execute(player, self.commands[action])
|
MagiChau/Hearthstone-Card-Lookup
|
card_lookup/searcher.py
|
Python
|
mit
| 4,417
| 0.029885
|
"""
Completes Hearthstone Card Lookup through comparing search queries to card names
"""
import copy
class Searcher:
def __init__(self, card_dict):
"""Initializes a Searcher object with a card dictionary provided
Args: card_dict(dict): Card dictionary with cards are separated
into sub dictionaries by set and names are located in a key named 'name'
"""
self.card_dict = copy.deepcopy(card_dict)
def change_card_dict(self, card_dict):
"""Replaces the currently used card dictionary with a deep copy of another
Args: card_dict(dict): Card dictionary with cards are separated
into sub dictionaries by set and names are located in a key named 'name'
"""
self.card_dict = copy.deepcopy(card_dict)
def find_card(self, query):
"""Finds the best matching card and returns its information
Args: query(string): Search query to use for lookup
Returns:
dict: Card information of best matching card
None: If no suitable matches were found return None
"""
results = self._find_matches(query, 0.5)
if len(results) > 0:
results.sort(key=lambda result: result[1], reverse=True)
return results[0][0]
else:
return None
def _find_matches(self, query, min_match):
"""Finds all cards matching a query and returns them
Args:
query(string): Search query to use for lookup
min_match(number): Minimum value for a card to be matched.
Value can range from 0 to 1.
Returns:
list: List of unsorted lists containing card information then its match percent
"""
result_list = []
l_query = query.lower()
#The card dictionary main keys are the sets card belongs to
for exp in self.card_dict:
for card in self.card_dict[exp]:
#Change all uppercase letters to lowercase in preparation for string comparisons
l_cardname = card['name'].lower()
percent_match = 0
search_words = {}
#Create a sub dictionary for each search word in the query
for word in l_query.split(' '):
search_words.update({word : {}})
card_words = l_cardname.split(' ')
#Calculate the match percentage between every search word and every card word
for search_word in search_words:
for card_word in card_words:
match = 1 - (Searcher.levenshtein_distance(search_word, card_word) /
max(len(search_word), len(card_word)))
if search_word not in search_words.keys():
search_words[search_word] = {card_word: { 'match' : match } }
else:
search_words[search_word].update( {card_word: { 'match' : match } } )
#Calculates the total match mercentage for the entire query and the card name
for search_word in search_words:
max_value_key = list(search_words[search_word].keys())[0]
max_value = search_words[search_word][max_value_key]
for card_word in search_words[search_word]:
if search_words[search_word][card_word]['match'] > max_value['match']:
max_value_key = card_word
max_value = search_words[search_word][card_word]
percent_card_match = len(max_value_key) / len(l_cardname.replace(" ", ""))
percent_query_match = len(search_word) / len(l_query.replace(" ", ""))
#These weights emphasizes matching the query more than the entire card
card_match_weight = 0.25
query_match_weight = 1 - car
|
d_match_weight
|
percent_match += (percent_query_match * max_value['match'] * query_match_weight +
percent_card_match * max_value['match'] * card_match_weight)
if percent_match >= min_match:
result_list.append([card, percent_match])
return result_list
def levenshtein_distance(s1,s2):
"""Levenshtein Distance Algorithm taken from Wikibooks
Args:
s1(string): First string for comparisons
s2(string): Second string for comparisons
Returns:
int: The levenshtein distance between two strings
"""
if len(s1) < len(s2):
return Searcher.levenshtein_distance(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
zero323/spark
|
dev/ansible-for-test-node/roles/jenkins-worker/files/util_scripts/post_github_pr_comment.py
|
Python
|
apache-2.0
| 2,930
| 0.000341
|
#!/usr/bin/env python3
"""Utility program to post a comment to a github PR"""
import argparse
import json
import os
import sys
import urllib.parse
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, Request
def _parse_args():
pr_link_var = "ghprbPullLink"
pr_link_option = "--pr-link"
github_oauth_key_var = "GITHUB_OAUTH_KEY"
github_oauth_key_option = "--github-oauth-key"
parser = argparse.ArgumentParser()
parser.add_argument(
"-pr",
pr_link_option,
default=os.environ.get(pr_link_var, ""),
help="Specify pull request link",
)
parser.add_argument(
github_oauth_key_option,
default=os.environ.get(github_oauth_key_var, ""),
help="Specify github oauth key",
)
args = parser.parse_args()
if not args.pr_link:
parser.error(
"Specify either environment variable {} or option {}".format(
|
pr_link_var, pr_link_option
)
)
if not args.github_oauth_key:
parser.error(
"Specify either environment variable {} or option {}".format(
github_oauth_key_var, github_oauth_key_option
)
)
return args
def post_message_to_github(msg, github_oauth_key, pr_link):
|
print("Attempting to post to Github...")
ghprb_pull_id = os.environ["ghprbPullId"]
api_url = os.getenv("GITHUB_API_BASE", "https://api.github.com/repos/apache/spark")
url = api_url + "/issues/" + ghprb_pull_id + "/comments"
posted_message = json.dumps({"body": msg})
request = Request(
url,
headers={
"Authorization": "token {}".format(github_oauth_key),
"Content-Type": "application/json",
},
data=posted_message.encode("utf-8"),
)
try:
response = urlopen(request)
if response.getcode() == 201:
print(" > Post successful.")
else:
print_err("Surprising post response.")
print_err(" > http_code: {}".format(response.getcode()))
print_err(" > api_response: {}".format(response.read()))
print_err(" > data: {}".format(posted_message))
except HTTPError as http_e:
print_err("Failed to post message to Github.")
print_err(" > http_code: {}".format(http_e.code))
print_err(" > api_response: {}".format(http_e.read()))
print_err(" > data: {}".format(posted_message))
except URLError as url_e:
print_err("Failed to post message to Github.")
print_err(" > urllib_status: {}".format(url_e.reason[1]))
print_err(" > data: {}".format(posted_message))
def print_err(msg):
print(msg, file=sys.stderr)
def _main():
args = _parse_args()
msg = sys.stdin.read()
post_message_to_github(msg, args.github_oauth_key, args.pr_link)
return 0
if __name__ == "__main__":
sys.exit(_main())
|
eunchong/build
|
third_party/twisted_10_2/twisted/test/test_strports.py
|
Python
|
bsd-3-clause
| 5,121
| 0.003515
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.application.strports}.
"""
from twisted.trial.unittest import TestCase
from twisted.application import strports
from twisted.application import internet
from twisted.internet.test.test_endpoints import ParserTestCase
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint, UNIXServerEndpoint
class DeprecatedParseTestCase(ParserTestCase):
"""
L{strports.parse} is deprecated. It's an alias for a method that is now
private in L{twisted.internet.endpoints}.
"""
def parse(self, *a, **kw):
result = strports.parse(*a, **kw)
warnings = self.flushWarnings([self.parse])
self.assertEquals(len(warnings), 1)
self.assertEquals(
warnings[0]['message'],
"twisted.application.strports.parse was deprecated "
"in Twisted 10.2.0: in favor of twisted.internet.endpoints.serverFromString")
return result
def test_simpleNumeric(self):
"""
Base numeric ports should be parsed as TCP.
"""
self.assertEquals(self.parse('80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def test_allKeywords(self):
"""
A collection of keyword arguments with no prefixed type, like 'port=80',
will be parsed as keyword arguments to 'tcp'.
"""
self.assertEquals(self.parse('port=80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
class ServiceTestCase(TestCase):
"""
Tests for L{strports.service}.
"""
def test_service(self):
"""
L{strports.service} returns a L{StreamServerEndpointService}
constructed with an endpoint produced from
L{endpoint.serverFromString}, using the same syntax.
"""
reactor = object() # the cake is a lie
aFactory = Factory()
aGoodPort = 1337
svc = strports.service(
'tcp:'+str(aGoodPort), aFactory, reactor=reactor)
self.assertIsInstance(svc, internet.StreamServerEndpointService)
# See twisted.application.test.test_internet.TestEndpointService.
# test_synchronousRaiseRaisesSynchronously
self.assertEquals(svc._raiseSynchronously, True)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
# Maybe we should implement equality for endpoints.
self.assertEquals(svc.endpoint._port, aGoodPort)
self.assertIdentical(svc.factory, aFactory)
self.assertIdentical(svc.endpoint._reactor, reactor)
def test_serviceDefaultReactor(self):
"""
L{strports.service} will use the default reactor when none is provided
as an argument.
"""
from twisted.internet import reactor as globalReactor
aService = strports.service("tcp:80", None)
self.assertIdentical(aService.endpoint._reactor, globalReactor)
def test_serviceDeprecatedDefault(self):
"""
L{strports.service} still accepts a 'default' argument, which will
affect the parsing of 'default' (i.e. 'not containing a colon')
endpoint descriptions, but this behavior is deprecated.
"""
svc = strports.service("8080", None, "unix")
self.assertIsInstanc
|
e(svc.endpoint, UNIXServerEndpoint)
warnings = self.flushWarnings([self.test_serviceDeprecatedDefault])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"The 'default' parameter was deprecated
|
in Twisted 10.2.0. "
"Use qualified endpoint descriptions; for example, 'tcp:8080'.")
self.assertEquals(len(warnings), 1)
# Almost the same case, but slightly tricky - explicitly passing the old
# default value, None, also must trigger a deprecation warning.
svc = strports.service("tcp:8080", None, None)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
warnings = self.flushWarnings([self.test_serviceDeprecatedDefault])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"The 'default' parameter was deprecated in Twisted 10.2.0.")
self.assertEquals(len(warnings), 1)
def test_serviceDeprecatedUnqualified(self):
"""
Unqualified strport descriptions, i.e. "8080", are deprecated.
"""
svc = strports.service("8080", None)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
warnings = self.flushWarnings(
[self.test_serviceDeprecatedUnqualified])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:8080'.")
self.assertEquals(len(warnings), 1)
|
LMML-Team/AlexaSkills
|
GenerateText/Generate_text.py
|
Python
|
mit
| 4,460
| 0.005605
|
import time
import numpy as np
from collections import Counter
from collections import defaultdict
import matplotlib.pyplot as plt
def unzip(pairs):
"""
Splits list of pairs (tuples) into separate lists
Parameter(s)
--------------
pairs(list of tuples):
List of pairs to be split
Return(s)
--------------
list 1:
List containing the first term of all pairs
list 2:
List containing the second term of all pairs
Example(s)
--------------
pairs = [("a", 1), ("b", 2)] --> ["a", "b"] and [1, 2]
"""
return tuple(zip(*pairs))
def normalize(counter):
"""
Convert counter to a list of (letter, frequency) pairs, sorted in descending order of frequency
Parameter(s)
--------------
counter (Counter-instance):
Counter to be converted. Letters and their # of occurrences
Return(s)
--------------
list[tuples]:
List containing tuples of (letter, frequency) pairs
Example(s)
--------------
counter = Counter({'a': 1, 'b': 3})
normalize(counter) = [('b', 0.75), ('a', 0.25)]
"""
total = sum(counter.values())
return [(char, cnt / total) for char, cnt in counter.most_common()]
def train_lm(text, n):
"""
Train character-based n-gram language model.
Given a sequence of n-1 characters, what is the probability distribution
for the n-th character in the sequence.
Parameter(s)
--------------
text(str):
A string (any case)
n(int):
Order of n-gram model
Returns
--------------
dict({ strings of len n-1: list[(char, probability)]:
Maps histories of characters (led by ~ if # of proceeding chars < n-1)
to list of pairs containing chars and the probability of them appearing
after that particular history
Example(s)
--------------
text = "cacao"
n = 3
train_lm(text, n) ={'ac': [('a', 1.0)],
'ca': [('c', 0.5), ('o', 0.5)],
'~c': [('a', 1.0)],
'~~': [('c', 1.0)]}
"""
# Initialize lm and history
raw_lm = defaultdict(Counter)
history = "~" * (n - 1)
# Count number of times characters appear following different histories
for x in text:
raw_lm[history][x] += 1
history = history[1:] + x
# Create final dictionary by normalizing
lm = {history: normalize(counter) for history, counter in raw_lm.items()}
return lm
def generate_letter(lm, history):
"""
Generates a letter according to the probability distribution of the specified history
Parameter(s)
--------------
lm (Dict{str: Tuple(str, float)}):
The n-gram language model relating history to letter probabilities
history (str):
Length n-1 context for use as key in language model
Returns
--------------
str:
The predicted character. '~' if history is not in language model.
"""
# Default behaviour if history is not in the model
if not history in lm:
return "~"
# Uses unzip function to split the letters from their probabilities
letters_probs = unzip(lm[history])
# Randomly selects and returns a letter from letters with its p in probs
i = np.random.choice(letters_probs[0], p=letters_probs[1])
return i
def generate_text(lm, n, nletters=100):
"""
Generates nletters random letters according to n-gram language model, lm
Parameters
--------------
lm (Dict{str: Tuple(str, float)}):
The n-gram language model relating history to letter probabilities
n (int):
Order of n-gram model.
nletters: int
Number of letters to randomly generate.
Returns
--------------
str
Model-generated text.
""
|
"
# Initializes history and text
text = []
history_indices = np.arange(len(lm.keys()))
index = np.random.choice(history_indices
|
)
history = list(lm.keys())
history = history[index]
print(history)
print(type(history))
# uses generate_letter function to generate text that is nletters long
for i in range(nletters):
c = generate_letter(lm, history)
text.append(c)
history = history[1:] + c
# Joins and returns the characters in text
return "".join(text)
|
getsentry/sentry-teamwork
|
sentry_teamwork/plugin.py
|
Python
|
apache-2.0
| 5,373
| 0.000186
|
from __future__ import absolute_import
import sentry_teamwork
from django import forms
from django.utils.translation import ugettext_lazy as _
from requests.exceptions import RequestException
from sentry.plugins.base import JSONResponse
from sentry.plugins.bases.issue import IssuePlugin, NewIssueForm
from sentry.utils.http import absolute_uri
from .client import TeamworkClient
ISSUES_URL = 'https://github.com/getsentry/sentry-teamwork/issues'
class TeamworkSettingsForm(forms.Form):
url = forms.URLField(
label=_('Teamwork URL'),
help_text=('i.e. http://sentry.teamwork.com'),
)
token = forms.CharField(label=_('Teamwork API Token'))
class TeamworkTaskForm(NewIssueForm):
title = forms.CharField(
label=_('Title'), max_length=200,
widget=forms.TextInput(attrs={'class': 'span9'}))
description = forms.CharField(
label=_('Description'),
widget=forms.Textarea(attrs={'class': 'span9'}))
project = forms.ChoiceField(label=_('Project'), choices=())
tasklist = forms.ChoiceField(label=_('Task List'), choices=())
create_issue_template = 'sentry_teamwork/create_issue.html'
def __init__(self, client, data=None, initial=None):
super(TeamworkTaskForm, self).__init__(data=data, initial=initial)
try:
project_list = client.list_projects()
except RequestException as e:
raise forms.ValidationError(
_('Error contacting Teamwork API: %s') % str(e))
self.fields['project'].choices = [
(str(i['id']), i['name']) for i in project_list
]
self.fields['project'].widget.choices = self.fields['project'].choices
if self.data.get('project'):
try:
tasklist_list = client.list_tasklists(data['project'])
except RequestException as e:
raise forms.ValidationError(
_('Error contacting Teamwork API: %s') % str(e))
self.fields['tasklist'].choices = [
(str(i['id']), i['name']) for i in tasklist_list
]
self.fields['tasklist'].widget.choices = self.fields['tasklist'].choices
class TeamworkTaskPlugin(IssuePlugin):
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry-teamwork"
title = _('Teamwork')
description = _('Create Teamwork Tasks.')
slug = 'teamwork'
resource_links = [
(_('Bug Tracker'), ISSUES_URL),
(_('Source'), 'https://github.com/getsentry/sentry-teamwork'),
]
conf_title = title
conf_key = slug
version = sentry_teamwork.VERSION
project_conf_form = TeamworkSettingsForm
new_issue_form = TeamworkTaskForm
create_issue_template = 'sentry_teamwork/create_issue.html'
def _get_group_description(self, request, group, event):
"""
Return group description in markdown-compatible format.
This overrides an internal method to IssuePlugin.
"""
output = [
absolute_uri(group.get_absolute_url()),
]
body = self._get_group_body(request, group, event)
if body:
output.extend([
'',
'\n'.join(' ' + line for line in body.splitlines()),
])
return '\n'.join(output)
def is_configured(self, request, project, **kwargs):
return all((
self.get_option(key, project)
for key in ('url', 'token')
|
))
def get_client(self, project):
return TeamworkClient(
base_url=self.get_option('
|
url', project),
token=self.get_option('token', project),
)
def get_new_issue_form(self, request, group, event, **kwargs):
"""
Return a Form for the "Create new issue" page.
"""
return self.new_issue_form(
client=self.get_client(group.project),
data=request.POST or None,
initial=self.get_initial_form_data(request, group, event),
)
def get_issue_url(self, group, issue_id, **kwargs):
url = self.get_option('url', group.project)
return '%s/tasks/%s' % (url.rstrip('/'), issue_id)
def get_new_issue_title(self, **kwargs):
return _('Create Teamwork Task')
def create_issue(self, request, group, form_data, **kwargs):
client = self.get_client(group.project)
try:
task_id = client.create_task(
content=form_data['title'],
description=form_data['description'],
tasklist_id=form_data['tasklist'],
)
except RequestException as e:
raise forms.ValidationError(
_('Error creating Teamwork task: %s') % str(e))
return task_id
def view(self, request, group, **kwargs):
op = request.GET.get('op')
# TODO(dcramer): add caching
if op == 'getTaskLists':
project_id = request.GET.get('pid')
if not project_id:
return HttpResponse(status=400)
client = self.get_client(group.project)
task_list = client.list_tasklists(project_id)
return JSONResponse([
{'id': i['id'], 'text': i['name']} for i in task_list
])
return super(TeamworkTaskPlugin, self).view(request, group, **kwargs)
|
Mu5tank05/Walter
|
plugins/tfc.py
|
Python
|
gpl-3.0
| 437
| 0.006865
|
from
|
cloudbot import hook
from cloudbot.util import http
# https://raw.githubusercontent.com/AwesomePowered/CloudBot/e01b2ab41985db8dbd6f6a1501ab9353f326188f/plugins/theyfightcrime.py
@hook.command("tfc")
def plot():
bold = "\x02"
try:
soup = http.get_soup(
|
"http://www.theyfightcrime.org")
plot = soup.find('table').find('p').text
return bold + plot + bold
except:
return "Could not get plot."
|
jevinw/rec_utilities
|
babel_util/scripts/ai_to_pajek.py
|
Python
|
agpl-3.0
| 918
| 0.003268
|
#!/usr/bin/env python
from util.misc import open_file, Benchmark
from util.Pa
|
jekFactory import PajekFactory
import ujson
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Creates Pajek (.net) files from JSON")
parser.add_argument('outfile')
parser.add_argument('--temp-dir', help="Directory to store temporary files in", default=None)
parser.add_argument
|
('infile', nargs='+')
arguments = parser.parse_args()
b = Benchmark()
pjk = PajekFactory(temp_dir=arguments.temp_dir)
for filename in arguments.infile:
with open_file(filename) as f:
for line in f:
entry = ujson.loads(line)
for citation in entry["citedBy"]:
pjk.add_edge(citation, entry["id"])
b.increment()
b.print_freq()
with open_file(arguments.outfile, "w") as f:
pjk.write(f)
|
LibCrowds/libcrowds-analyst
|
libcrowds_analyst/analysis/convert_a_card.py
|
Python
|
mit
| 1,810
| 0
|
# -*- coding: utf8 -*-
"""Convert-a-Card analysis module."""
import time
import enki
from libcrowds_analyst.analysis import helpers
from libcrowds_analyst import object_loader
MATCH_PERCENTAGE = 60
VALID_KEYS = ['oclc', 'shelfmark', 'comments']
def analyse(api_key, endpoint, doi, project_id, result_id, project_short_name,
path, throttle, **kwargs):
"""Analyse C
|
onvert-a-Card results."""
e = enki.Enki(api_key, endpoint, project_short_name, all=1)
result = enki.pbclient.find_results(project_id, id=result_id, limit=1,
all=1)[0]
df = helpers.get_task_run_df(e, result.task_id)
|
df = df.loc[:, df.columns.isin(VALID_KEYS)]
df = helpers.drop_empty_rows(df)
n_task_runs = len(df.index)
# Initialise the result
defaults = {k: "" for k in df.keys()}
result.info = helpers.init_result_info(doi, path, defaults)
has_answers = not df.empty
has_matches = helpers.has_n_matches(df, n_task_runs, MATCH_PERCENTAGE)
# Matching answers
if has_answers and has_matches:
for k in df.keys():
result.info[k] = df[k].value_counts().idxmax()
# Varied answers
elif has_answers:
result.info['analysis_complete'] = False
enki.pbclient.update_result(result)
time.sleep(throttle)
def analyse_all(**kwargs):
"""Analyse all Convert-a-Card results."""
print kwargs
e = enki.Enki(kwargs['api_key'], kwargs['endpoint'],
kwargs['project_short_name'], all=1)
results = object_loader.load(enki.pbclient.find_results,
project_id=e.project.id, all=1)
for result in results:
kwargs['project_id'] = e.project.id
kwargs['result_id'] = result.id
print kwargs
analyse(**kwargs.copy())
|
chawk/django-easy-avatar
|
easy_avatar/urls.py
|
Python
|
mit
| 148
| 0.02027
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns(''
|
,
# Examples:
url(r'^upload/$', '
|
easy_avatar.views.upload'),
)
|
DamienIrving/ocean-analysis
|
visualisation/water_cycle/plot_pe_spatial.py
|
Python
|
mit
| 9,330
| 0.005681
|
"""Plot spatial P-E"""
import re
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import numpy as np
import matplotlib.pyplot as plt
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import cartopy.crs as ccrs
import cmdline_provenance as cmdprov
repo_dir = '/'.join(script_dir.split('/')[:-2])
module_dir = repo_dir + '/modules'
sys.path.append(module_dir)
try:
import general_io as gio
import timeseries
except ImportError:
raise ImportError('Script and modules in wrong directories')
def regrid(cube):
"""Define the sample points for interpolation"""
lats = list(np.arange(-89, 90, 2))
lons = list(np.arange(1, 360, 2))
sample_points = []
coord_names = [coord.name() for coord in cube.dim_coords]
if 'time' in coord_names:
coord_names.remove('time')
for coord in coord_names:
if 'lat' in coord:
sample_points.append((coord, lats))
elif 'lon' in coord:
sample_points.append((coord, lons))
cube = cube.interpolate(sample_points, iris.analysis.Linear())
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
cube.coord('latitude').standard_name = 'latitude'
cube.coord('latitude').long_name = 'latitude'
cube.coord('latitude').var_name = 'lat'
cube.coord('latitude').units = 'degrees_north'
cube.coord('latitude').attributes = {}
cube.coord('longitude').standard_name = 'longitude'
cube.coord('longitude').long_name = 'longitude'
cube.coord('longitude').var_name = 'lon'
cube.coord('longitude').units = 'degrees_east'
cube.coord('longitude').circular = True
cube.coord('longitude').attributes = {}
return cube
def get_cube_list(infiles, agg, time_bounds=None, quick=False):
"""Read and process data."""
assert agg in ['clim', 'anom']
ensemble_cube_list = iris.cube.CubeList([])
for ensnum, ensemble_member in enumerate(infiles):
print(ensemble_member)
cube, history = gio.combine_files(ensemble_member,
'precipitation_minus_evaporation_flux',
new_calendar='365_day')
cube = gio.check_time_units(cube)
if time_bounds:
time_constraint = gio.get_time_constraint(time_bounds)
cube = cube.extract(time_constraint)
elif quick:
cube = cube[0:120, ::]
if agg == 'clim':
cube = timeseries.convert_to_annual(cube, aggregation='mean', days_in_month=True)
cube = cube.collapsed('time', iris.analysis.MEAN)
elif agg == 'anom':
start_data = cube.data[0, ::]
cube = cube[-1, ::]
cube.data = cube.data - start_data
cube.remove_coord('time')
cube = regrid(cube)
new_aux_coord = iris.coords.AuxCoord(ensnum, long_name='ensemble_member', units='no_unit')
cube.add_aux_coord(new_aux_coord)
cube.cell_methods = ()
ensemble_cube_list.append(cube)
print("Total number of models:", len(ensemble_cube_list))
return ensemble_cube_list, history
def ensemble_stats(cube_list):
"""Get the ensemble mean and sign agreement"""
equalise_attributes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN, mdtol=0)
ensemble_mean.remove_coord('ensemble_member')
ensemble_agreement = ensemble_mean.copy()
nmodels = ensemble_cube.shape[0]
pos_data = ensemble_cube.data > 0.0
ensemble_agreement.data = pos_data.sum(axis=0) / nmodels
return ensemble_mean, ensemble_agreement
def plot_data(ax, ensemble_mean, ensemble_agreement, agg, title,
agreement_bounds=None, clim=None):
"""Plot ensemble data"""
assert agg in ['clim', 'anom']
inproj = ccrs.PlateCarree()
plt.sca(ax)
plt.gca().set_global()
if agg == 'clim':
cmap = 'BrBG'
levels = np.arange(-7, 8, 1)
cbar_label = 'Annual mean P-E (mm/day)'
else:
cmap = 'RdBu'
levels = np.arange(-9000, 9100, 1500)
cbar_label = 'Time-integrated P-E anomaly, 1861-2005 (kg m-2)'
x = ensemble_mean.coord('longitude').points
y = ensemble_mean.coord('latitude').points
cf = ax.contourf(x, y, ensemble_mean.data,
transform=inproj,
cmap=cmap,
levels=levels,
extend='both')
if agreement_bounds:
hatch_data = ensemble_agreement.data
ax.contourf(x, y, hatch_data,
transform=inproj,
colors='none',
levels=agreement_bounds,
|
hatches=['\\\\'],) # # '.', '/', '\\', '\\\\', '*'
if clim:
ce = ax.contour(x, y, clim.data,
transform=inproj,
colors=['goldenrod', 'black', 'green'],
levels=np.array([-2, 0, 2]))
cba
|
r = plt.colorbar(cf)
cbar.set_label(cbar_label) #, fontsize=label_size)
# cbar.ax.tick_params(labelsize=number_size)
plt.gca().coastlines()
ax.set_title(title)
if agg == 'clim':
lons = np.arange(-180, 180, 0.5)
lats_sh = np.repeat(-20, len(lons))
lats_nh = np.repeat(20, len(lons))
plt.plot(lons, lats_sh, color='0.5') # linestyle, linewidth
plt.plot(lons, lats_nh, color='0.5')
def main(args):
"""Run the program."""
clim_cube_list, clim_history = get_cube_list(args.clim_files, 'clim', quick=args.quick)
clim_ensemble_mean, clim_ensemble_agreement = ensemble_stats(clim_cube_list)
clim_ensemble_mean.data = clim_ensemble_mean.data * 86400
ghg_cube_list, ghg_history = get_cube_list(args.ghg_files, 'anom', time_bounds=args.time_bounds)
ghg_ensemble_mean, ghg_ensemble_agreement = ensemble_stats(ghg_cube_list)
aa_cube_list, aa_history = get_cube_list(args.aa_files, 'anom', time_bounds=args.time_bounds)
aa_ensemble_mean, aa_ensemble_agreement = ensemble_stats(aa_cube_list)
hist_cube_list, hist_history = get_cube_list(args.hist_files, 'anom', time_bounds=args.time_bounds)
hist_ensemble_mean, hist_ensemble_agreement = ensemble_stats(hist_cube_list)
width = 25
height = 10
fig = plt.figure(figsize=[width, height])
outproj = ccrs.PlateCarree(central_longitude=180.0)
nrows = 2
ncols = 2
ax1 = plt.subplot(nrows, ncols, 1, projection=outproj)
plot_data(ax1,
clim_ensemble_mean,
clim_ensemble_agreement,
'clim',
'(a) piControl',
agreement_bounds=[0.33, 0.66])
ax2 = plt.subplot(nrows, ncols, 2, projection=outproj)
plot_data(ax2,
ghg_ensemble_mean,
ghg_ensemble_agreement,
'anom',
'(b) GHG-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax3 = plt.subplot(nrows, ncols, 3, projection=outproj)
plot_data(ax3,
aa_ensemble_mean,
aa_ensemble_agreement,
'anom',
'(c) AA-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax4 = plt.subplot(nrows, ncols, 4, projection=outproj)
plot_data(ax4,
hist_ensemble_mean,
hist_ensemble_agreement,
'anom',
'(d) historical',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
fig.tight_layout()
fig.subplots_adjust(wspace=-0.15, hspace=0.2)
plt.savefig(args.outfile, bbox_inches='tight', dpi=300)
metadata_dict = {args.ghg_files[-1]: ghg_history[-1],
args.clim_files[-1]: clim_history[-1]}
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', args.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/HMM/DynamicProgramming.py
|
Python
|
gpl-2.0
| 12,644
| 0.001107
|
"""Dynamic Programming algorithms for general usage.
This module contains classes which implement Dynamic Programming
algorithms that can be used generally.
"""
class AbstractDPAlgorithms:
"""An abstract class to calculate forward and backward probabiliies.
This class should not be instantiated directly, but should be used
through a derived class which implements proper scaling of variables.
This class is just meant to encapsulate the basic foward and backward
algorithms, and allow derived classes to deal with the problems of
multiplying probabilities.
Derived class of this must implement:
o _forward_recursion -- Calculate the forward values in the recursion
using some kind of technique for preventing underflow errors.
o _backward_recursion -- Calculate the backward values in the recursion
step using some technique to prevent underflow errors.
"""
def __init__(self, markov_model, sequence):
"""Initialize to calculate foward and backward probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A training sequence containing a set of emissions.
"""
self._mm = markov_model
self._seq = sequence
def _foward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the forward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def forward_algorithm(self):
"""Calculate sequence probability using the forward algorithm.
This implements the foward algorithm, as described on p57-58 of
Durbin et al.
Returns:
o A dictionary containing the foward variables. This has keys of the
form (state letter, position in the training sequence), and values
containing the calculated forward variable.
o The calculated probability of the sequence.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
forward_var = {}
# f_{0}(0) = 1
forward_var[(state_letters[0], -1)] = 1
# f_{k}(0) = 0, for k > 0
for k in range(1, len(state_letters)):
forward_var[(state_letters[k], -1)] = 0
# -- now do the recursion step
# loop over the training sequence
# Recursion step: (i = 1 .. L)
for i
|
in range(len(self._seq.emissions)):
|
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the forward value using the appropriate
# method to prevent underflow errors
forward_value = self._forward_recursion(main_state, i,
forward_var)
if forward_value is not None:
forward_var[(main_state, i)] = forward_value
# -- termination step - calculate the probability of the sequence
first_state = state_letters[0]
seq_prob = 0
for state_item in state_letters:
# f_{k}(L)
forward_value = forward_var[(state_item,
len(self._seq.emissions) - 1)]
# a_{k0}
transition_value = self._mm.transition_prob[(state_item,
first_state)]
seq_prob += forward_value * transition_value
return forward_var, seq_prob
def _backward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the backward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def backward_algorithm(self):
"""Calculate sequence probability using the backward algorithm.
This implements the backward algorithm, as described on p58-59 of
Durbin et al.
Returns:
o A dictionary containing the backwards variables. This has keys
of the form (state letter, position in the training sequence),
and values containing the calculated backward variable.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
backward_var = {}
first_letter = state_letters[0]
# b_{k}(L) = a_{k0} for all k
for state in state_letters:
backward_var[(state, len(self._seq.emissions) - 1)] = \
self._mm.transition_prob[(state, state_letters[0])]
# -- recursion
# first loop over the training sequence backwards
# Recursion step: (i = L - 1 ... 1)
all_indexes = range(len(self._seq.emissions) - 1)
all_indexes.reverse()
for i in all_indexes:
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the backward value using the appropriate
# method to prevent underflow errors
backward_value = self._backward_recursion(main_state, i,
backward_var)
if backward_value is not None:
backward_var[(main_state, i)] = backward_value
# skip the termination step to avoid recalculations -- you should
# get sequence probabilities using the forward algorithm
return backward_var
class ScaledDPAlgorithms(AbstractDPAlgorithms):
"""Implement forward and backward algorithms using a rescaling approach.
This scales the f and b variables, so that they remain within a
manageable numerical interval during calculations. This approach is
described in Durbin et al. on p 78.
This approach is a little more straightfoward then log transformation
but may still give underflow errors for some types of models. In these
cases, the LogDPAlgorithms class should be used.
"""
def __init__(self, markov_model, sequence):
"""Initialize the scaled approach to calculating probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A TrainingSequence object that must have a
set of emissions to work with.
"""
AbstractDPAlgorithms.__init__(self, markov_model, sequence)
self._s_values = {}
def _calculate_s_value(self, seq_pos, previous_vars):
"""Calculate the next scaling variable for a sequence position.
This utilizes the approach of choosing s values such that the
sum of all of the scaled f values is equal to 1.
Arguments:
o seq_pos -- The current position we are at in the sequence.
o previous_vars -- All of the forward or backward variables
calculated so far.
Returns:
o The calculated scaling variable for the sequence item.
"""
# all of the different letters the state can have
state_letters = self._seq.states.alphabet.letters
# loop over all of the possible states
s_value = 0
for main_state in state_letters:
emission = self._mm.emission_prob[(main_state,
self._seq.emissions[seq_pos])]
# now sum over all of the previous vars and transitions
trans_and_var_sum = 0
for second_state in self._mm.transitions_from(main_state):
# the value of the previous f or b value
var_value = previou
|
gem/oq-hazardlib
|
openquake/hmtk/strain/regionalisation/__init__.py
|
Python
|
agpl-3.0
| 1,925
| 0.004675
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
#License as published by the Free Software Foundation, either version
#3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
#DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
#is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
#Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# ris
|
k and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contri
|
bution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
|
supriyasawant/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/urls/file.py
|
Python
|
agpl-3.0
| 1,918
| 0.009385
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('gnowsys_ndf.ndf.views.file',
url(r'^[/]$', 'file', name='file'),
# url(r'^/(?P<file_id>[\w-]+)$', 'file', name='file'),
url(r'^/uploadDoc/$', 'uploadDoc', name='uploadDoc'), #Direct ot html template
url(r'^/submitDoc/', 'submitDoc', name='submitDoc'),
url(r'^/submit/', 'submitDoc', name='submitDoc'),
url(r'^/documentList/', 'GetDoc', name='documentList'),
url(r'^/readDoc/(?P<_id>[\w-]+)/(?:(?P<file_name>[^/]+))?$', 'readDoc', name='read_file'),
url(r'^/search/$', 'file_search', name='file_search'),
url(r'^/details/(?P<_id>[\w-]+)$', 'file_detail', name='file_detail'),
url(r'^/(?P<_id>[\w-]+)$', 'file_detail', name='file_detail'),
url(r'^/thumbnail/(?P<_id>[\w-]+)$', 'getFileThumbnail', name='getFileThumbnail'),
#url(r'^/delete_file/(?P<_id>[\w-]+)$', 'delete_file', name='delete_file'),
url(r'^/delete/(?P<_id>[\w-]+)$', 'delete_file', name='delete_file'),
url(r'^/edit_file/(?P<_id>[\w-]+)$', 'file_edit', name='file_edit'),
# url(r'^/data-review/$', 'data_review', name='data_review'),
# url(r'^/data-review/page-no=(?P<page_no>\d+)/$', 'data_review', name='data_review_page'),
# url(r'^/data-review/save/$', 'data_review_save', name='data_review_save'),
url(r'^/edit/(?P<_id>[\w-]+)$', 'file_edit', name='file_
|
edit'),
url(r'^/(?P<fi
|
letype>[\w-]+)/page-no=(?P<page_no>\d+)/$', 'paged_file_objs', name='paged_file_objs'),
)
|
shamanu4/netmiko
|
netmiko/hp/hp_procurve_ssh.py
|
Python
|
mit
| 2,061
| 0
|
from __future__ import print_function
from __future__ import unicode_literals
import re
import time
import socket
from netmiko.cisco_base_connection import CiscoSSHConnection
class HPProcurveSSH(CiscoSSHConnection):
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Procurve uses - 'Press any key to continue'
"""
delay_factor = self.select_delay_factor(delay_factor=0)
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
# HP output contains VT100 escape codes
self.ansi_esc
|
ape_codes = True
self.set_base_prompt()
self.disable_paging(command="\nno
|
page\n")
self.set_terminal_width(command='terminal width 511')
def enable(self, cmd='enable', pattern='password', re_flags=re.IGNORECASE,
default_username='manager'):
"""Enter enable mode"""
debug = False
output = self.send_command_timing(cmd)
if 'username' in output.lower():
output += self.send_command_timing(default_username)
if 'password' in output.lower():
output += self.send_command_timing(self.secret)
if debug:
print(output)
self.clear_buffer()
return output
def cleanup(self):
"""Gracefully exit the SSH session."""
self.exit_config_mode()
self.write_channel("logout\n")
count = 0
while count <= 5:
time.sleep(.5)
output = self.read_channel()
if 'Do you want to log out' in output:
self.write_channel("y\n")
# Don't automatically save the config (user's responsibility)
elif 'Do you want to save the current' in output:
self.write_channel("n\n")
try:
self.write_channel("\n")
except socket.error:
break
count += 1
|
cduff4464/2016_summer_XPD
|
out_of_date/matplotlib_demo/Open_New_Plot_Demo.py
|
Python
|
bsd-2-clause
| 646
| 0.009288
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons
t = np.arange(0.0, 2.0, 0.01)
s0 = np.sin(2*np.pi*t)
s1 = np.sin(4*np.pi*t)
s2 = np.sin(8*np.pi*t)
fig = plt.figure(1)
rax = plt.subplot2grid((1,1), (0,0))
radio
|
= RadioButtons(rax, ('2 Hz', '4 Hz', '8 Hz', 'Clear'))
def new_plot(Event):
plt.figure(2)
if Event == '2 Hz':
plt.plot(t
|
, s0)
plt.show()
if Event == 'Clear':
plt.clf().figure(2)
if Event == '8 Hz':
plt.plot(t, s2)
plt.show()
if Event == '4 Hz':
plt.plot(t,s1)
plt.show()
radio.on_clicked(new_plot)
plt.show()
|
pamoller/xmlformatter
|
xmlformatter.py
|
Python
|
mit
| 30,777
| 0.000617
|
"""
Format and compress XML documents
"""
import getopt
import re
import sys
import xml.parsers.expat
__version__ = "0.2.4"
DEFAULT_BLANKS = False
DEFAULT_COMPRESS = False
DEFAULT_SELFCLOSE = False
DEFAULT_CORRECT = True
DEFAULT_INDENT = 2
DEFAULT_INDENT_CHAR = " "
DEFAULT_INLINE = True
DEFAULT_ENCODING_INPUT = None
DEFAULT_ENCODING_OUTPUT = None
DEFAULT_EOF_NEWLINE = False
class Formatter:
# Use internal encoding:
encoding_internal = None
def __init__(
self,
indent=DEFAULT_INDENT,
preserve=[],
blanks=DEFAULT_BLANKS,
compress=DEFAULT_COMPRESS,
selfclose=DEFAULT_SELFCLOSE,
indent_char=DEFAULT_INDENT_CHAR,
encoding_input=DEFAULT_ENCODING_INPUT,
encoding_output=DEFAULT_ENCODING_OUTPUT,
inline=DEFAULT_INLINE,
correct=DEFAULT_CORRECT,
eof_newline=DEFAULT_EOF_NEWLINE,
):
# Minify the XML document:
self.compress = compress
# Use self-closing tags
self.selfclose = selfclose
# Correct text nodes
self.correct = correct
# Decode the XML document:
self.encoding_input = self.enc_normalize(encoding_input)
# Encode ouput by:
self.encoding_output = self.enc_normalize(encoding_output)
# Insert indent = indent*level*indent_char:
self.indent = int(indent)
# Indent by char:
self.indent_char = indent_char
# Format inline objects:
self.inline = inline
# Don't compress this elements and their descendants:
self.preserve = preserve
# Preserve blanks lines (collapse multiple into one)
self.blanks = blanks
# Always add a newline character at EOF
self.eof_newline = eof_newline
@property
def encoding_effective(self, enc=None):
if self.encoding_output:
return self.encoding_output
elif self.encoding_internal:
return self.encoding_internal
elif self.encoding_input:
return self.encoding_input
else:
return "UTF-8"
def enc_normalize(self, string):
""" Format an Encoding identifier to upper case. """
if isinstance(string, str):
return string.upper()
return None
def enc_encode(self, strg):
""" Encode a formatted XML document in target"""
if sys.version_info > (3, 0):
return strg.encode(self.encoding_effective) # v3
return strg.decode("utf-8").encode(self.encoding_effective) # v2
def enc_output(self, path, strg):
""" Output according to encoding """
fh = sys.stdout
if strg is not None:
if path is not None:
open(path, "w+b").write(strg)
elif sys.version_info > (3, 0):
fh.buffer.write(strg)
else:
fh.write(strg)
def format_string(self, xmldoc=""):
""" Format a XML document given by xmldoc """
token_list = F
|
ormatter.TokenList(self)
token_list.parser.Parse(xmldoc)
return self.enc_encode(str(token_list))
def format_file(self, file):
""" Format a XML document given by path name """
fh = open(file, "rb")
token_list = Formatter.TokenList(self)
token_list.parser.ParseFile(fh)
fh.close()
return self.enc_encode(str(token_list))
class TokenList:
# Being in a cdata section:
cdata_section = False
# Lock deletion of lea
|
ding whitespace:
desc_mixed_level = None
# Lock indenting:
indent_level = None
# Reference the Formatter:
formatter = None
# Count levels:
level_counter = 0
# Lock deletion of whitespaces:
preserve_level = None
def __init__(self, formatter):
# Keep tokens in a list:
self._list = []
self.formatter = formatter
self.parser = xml.parsers.expat.ParserCreate(
encoding=self.formatter.encoding_input
)
self.parser.specified_attributes = 1
self.parser.buffer_text = True
# Push tokens to buffer:
for pattern in [
"XmlDecl%s",
"ElementDecl%s",
"AttlistDecl%s",
"EntityDecl%s",
"StartElement%s",
"EndElement%s",
"ProcessingInstruction%s",
"CharacterData%s",
"Comment%s",
"Default%s",
"StartDoctypeDecl%s",
"EndDoctypeDecl%s",
"StartCdataSection%s",
"EndCdataSection%s",
"NotationDecl%s",
]:
setattr(
self.parser, pattern % "Handler", self.xml_handler(pattern % "")
)
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __getitem__(self, pos):
if 0 <= pos < len(self._list):
return self._list[pos]
else:
raise IndexError
def __setitem__(self, pos, value):
if 0 <= pos < len(self._list):
self._list[pos] = value
else:
raise IndexError
def __str__(self):
""" Returns the formatted XML document in UTF-8. """
for step in ["configure", "pre_operate", "post_operate"]:
for tk in iter(self):
getattr(tk, step)()
result = ""
for tk in iter(self):
result += str(tk)
if self.formatter.eof_newline and not result.endswith("\n"):
result += "\n"
return result
def append(self, tk):
""" Add token to tokenlist. """
tk.pos = len(self._list)
self._list.append(tk)
def level_increment(self):
""" Increment level counter. """
self.level_counter += 1
def level_decrement(self):
""" Decrement level counter. """
self.level_counter -= 1
def token_descendant_mixed(self, tk):
""" Mark descendants of mixed content. """
if tk.name == "StartElement":
# Mark every descendant:
if tk.content_model in [2, 3] and self.desc_mixed_level is None:
self.desc_mixed_level = tk.level
return False
return self.desc_mixed_level is not None
elif tk.name == "EndElement":
# Stop marking every descendant:
if tk.level is self.desc_mixed_level:
self.desc_mixed_level = None
elif self.desc_mixed_level is not None:
return True
return False
elif self.desc_mixed_level is None:
return False
return self.desc_mixed_level >= tk.level - 1
def sequence(self, tk, scheme=None):
"""Returns sublist of token list.
None: next to last
EndElement: first to previous"""
if scheme == "EndElement" or (scheme is None and tk.end):
return reversed(self._list[: tk.pos])
return self._list[(tk.pos + 1) :]
def token_indent(self, tk):
if self.formatter.inline:
return self.token_indent_inline(tk)
""" Indent outside of text of mixed content. """
if tk.name == "StartElement":
# Block indenting for descendants of text and mixed content:
if tk.content_model in [2, 3] and self.indent_level is None:
self.indent_level = tk.level
elif self.indent_level is not None:
return False
return True
elif tk.name == "EndElement":
# Unblock indenting for descendants of text and mixed content:
if tk.level == self.indent_level:
self.indent_level = None
elif se
|
Errantgod/azaharTEA
|
menubar/menus/filechoosers/__init__.py
|
Python
|
mit
| 60
| 0.016667
|
__all__ = ['savedialog.SaveDia
|
log','opendialog.OpenDialog']
|
|
setaris/django-tesseract2
|
deployment/fabfile.py
|
Python
|
bsd-3-clause
| 2,815
| 0.003908
|
import os
from fabric.api import env, run, cd, sudo, settings
from fabric.contrib.files import upload_template
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Variable %s is not set in the environment" % var_name
raise Exception(error_msg)
env.user = "ubuntu"
#env.hosts = [get_env_variable('TESSERACT_HOST')]
#env.key_filename = [get_env_variable('TESSERACT_AWS_KEYFILE')]
env.repo_url = 'https://github.com/setaris/django-tesseract2.git'
env.root = "/home/ubuntu/webapps/djangotesseract2"
env.virtualenv = "/home/ubuntu/envs/djangotesseract2env"
env.project = "%s/djangotesseract2" % env.root
env.servicename = "djangotesseract2"
def deploy():
"Full deploy: push, buildout, and reload."
push()
update_dependencies()
syncdb()
update_services()
reload()
def push():
"Push out new code to the server."
with cd("%(project)s" % env):
run("git pull origin master")
def update_services():
upload_template('./nginx.conf',
'/etc/nginx/sites-enabled/default', use_sudo=True)
upload_template('./service.conf',
'/etc/init/djangotesseract2.conf', use_sudo=True)
with cd("/etc/nginx/sites-enabled"):
sudo('rm *.bak')
def update_dependencies():
run("%(virtualenv)s/bin/pip install -r %(root)s/requirements.txt" % env)
def createsuperuser():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py createsuperuser --settings=settings.production" % env)
def syncdb():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py syncdb --noinput --settings=settings.production" % env)
def collectstatic():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py collectstatic --settings=settings.production" % env)
def reload():
with settings(warn_only=True):
sudo("sudo initctl stop djangotesseract2")
sud
|
o("sudo initctl start djangotesseract2")
sudo('/etc/init.d/nginx reload')
def setup():
run("mkdir -p %(root)s" % env)
sudo("aptitude update")
sudo("aptitude -y install git-core python-dev python-setuptools "
"build-essential subversion mercurial nginx "
"libjpeg62 libjpeg62-dev zlib1g-dev libfreetype6 libfreetype6-dev "
"ghostscript imagemagick "
"tesseract-ocr libtesseract-dev"
|
)
sudo("easy_install virtualenv")
run("virtualenv %(virtualenv)s" % env)
run("%(virtualenv)s/bin/pip install -U pip" % env)
with cd("~/webapps/"):
run("git clone %(repo_url)s djangotesseract2" % env)
with cd("%(project)s" % env):
run('mkdir assets')
run('mkdir media')
run('mkdir static')
deploy()
|
dmick/teuthology
|
teuthology/config.py
|
Python
|
mit
| 8,592
| 0.000466
|
import os
import yaml
import logging
import collections
def init_logging():
log = logging.getLogger(__name__)
return log
log = init_logging()
class YamlConfig(collections.MutableMapping):
"""
A configuration object populated by parsing a yaml file, with optional
default values.
Note that modifying the _defaults attribute of an instance can potentially
yield confusing results; if you need to do modify defaults, use the class
variable or create a subclass.
"""
_defaults = dict()
def
|
__init__(self, yaml_path=None):
self.yaml_path = yaml_path
if self.yaml_path:
self.load()
else:
self._conf = dict()
def load(
|
self, conf=None):
if conf:
if isinstance(conf, dict):
self._conf = conf
else:
self._conf = yaml.safe_load(conf)
return
if os.path.exists(self.yaml_path):
with open(self.yaml_path) as f:
self._conf = yaml.safe_load(f)
else:
log.debug("%s not found", self.yaml_path)
self._conf = dict()
def update(self, in_dict):
"""
Update an existing configuration using dict.update()
:param in_dict: The dict to use to update
"""
self._conf.update(in_dict)
@classmethod
def from_dict(cls, in_dict):
"""
Build a config object from a dict.
:param in_dict: The dict to use
:returns: The config object
"""
conf_obj = cls()
conf_obj._conf = in_dict
return conf_obj
def to_dict(self):
"""
:returns: A shallow copy of the configuration as a dict
"""
return dict(self._conf)
@classmethod
def from_str(cls, in_str):
"""
Build a config object from a string or yaml stream.
:param in_str: The stream or string
:returns: The config object
"""
conf_obj = cls()
conf_obj._conf = yaml.safe_load(in_str)
return conf_obj
def to_str(self):
"""
:returns: str(self)
"""
return str(self)
def get(self, key, default=None):
return self._conf.get(key, default)
def __str__(self):
return yaml.safe_dump(self._conf, default_flow_style=False).strip()
def __repr__(self):
return self.__str__()
def __getitem__(self, name):
return self.__getattr__(name)
def __getattr__(self, name):
return self._conf.get(name, self._defaults.get(name))
def __contains__(self, name):
return self._conf.__contains__(name)
def __setattr__(self, name, value):
if name.endswith('_conf') or name in ('yaml_path'):
object.__setattr__(self, name, value)
else:
self._conf[name] = value
def __delattr__(self, name):
del self._conf[name]
def __len__(self):
return self._conf.__len__()
def __iter__(self):
return self._conf.__iter__()
def __setitem__(self, name, value):
self._conf.__setitem__(name, value)
def __delitem__(self, name):
self._conf.__delitem__(name)
class TeuthologyConfig(YamlConfig):
"""
This class is intended to unify teuthology's many configuration files and
objects. Currently it serves as a convenient interface to
~/.teuthology.yaml and nothing else.
"""
yaml_path = os.path.join(os.path.expanduser('~/.teuthology.yaml'))
_defaults = {
'archive_base': '/home/teuthworker/archive',
'archive_upload': None,
'archive_upload_key': None,
'archive_upload_url': None,
'automated_scheduling': False,
'reserve_machines': 5,
'ceph_git_base_url': 'https://github.com/ceph/',
'ceph_git_url': None,
'ceph_qa_suite_git_url': None,
'ceph_cm_ansible_git_url': None,
'use_conserver': False,
'conserver_master': 'conserver.front.sepia.ceph.com',
'conserver_port': 3109,
'gitbuilder_host': 'gitbuilder.ceph.com',
'githelper_base_url': 'http://git.ceph.com:8080',
'check_package_signatures': True,
'lab_domain': 'front.sepia.ceph.com',
'lock_server': 'http://paddles.front.sepia.ceph.com/',
'max_job_time': 259200, # 3 days
'nsupdate_url': 'http://nsupdate.front.sepia.ceph.com/update',
'results_server': 'http://paddles.front.sepia.ceph.com/',
'results_ui_server': 'http://pulpito.ceph.com/',
'results_sending_email': 'teuthology',
'results_timeout': 43200,
'src_base_path': os.path.expanduser('~/src'),
'verify_host_keys': True,
'watchdog_interval': 120,
'kojihub_url': 'http://koji.fedoraproject.org/kojihub',
'kojiroot_url': 'http://kojipkgs.fedoraproject.org/packages',
'koji_task_url': 'https://kojipkgs.fedoraproject.org/work/',
'baseurl_template': 'http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri}',
'use_shaman': True,
'shaman_host': 'shaman.ceph.com',
'teuthology_path': None,
'suite_verify_ceph_hash': True,
'suite_allow_missing_packages': False,
'openstack': {
'clone': 'git clone http://github.com/ceph/teuthology',
'user-data': 'teuthology/openstack/openstack-{os_type}-{os_version}-user-data.txt',
'ip': '1.1.1.1',
'machine': {
'disk': 20,
'ram': 8000,
'cpus': 1,
},
'volumes': {
'count': 0,
'size': 1,
},
},
'rocketchat': None,
'sleep_before_teardown': 0,
}
def __init__(self, yaml_path=None):
super(TeuthologyConfig, self).__init__(yaml_path or self.yaml_path)
def get_ceph_cm_ansible_git_url(self):
return (self.ceph_cm_ansible_git_url or
self.ceph_git_base_url + 'ceph-cm-ansible.git')
def get_ceph_qa_suite_git_url(self):
return (self.ceph_qa_suite_git_url or
self.get_ceph_git_url())
def get_ceph_git_url(self):
return (self.ceph_git_url or
self.ceph_git_base_url + 'ceph-ci.git')
class JobConfig(YamlConfig):
pass
class FakeNamespace(YamlConfig):
"""
This class is meant to behave like a argparse Namespace
We'll use this as a stop-gap as we refactor commands but allow the tasks
to still be passed a single namespace object for the time being.
"""
def __init__(self, config_dict=None):
if not config_dict:
config_dict = dict()
self._conf = self._clean_config(config_dict)
set_config_attr(self)
def _clean_config(self, config_dict):
"""
Makes sure that the keys of config_dict are able to be used. For
example the "--" prefix of a docopt dict isn't valid and won't populate
correctly.
"""
result = dict()
for key, value in config_dict.items():
new_key = key
if new_key.startswith("--"):
new_key = new_key[2:]
elif new_key.startswith("<") and new_key.endswith(">"):
new_key = new_key[1:-1]
if "-" in new_key:
new_key = new_key.replace("-", "_")
result[new_key] = value
return result
def __getattr__(self, name):
"""
We need to modify this for FakeNamespace so that getattr() will
work correctly on a FakeNamespace instance.
"""
if name in self._conf:
return self._conf[name]
elif name in self._defaults:
return self._defaults[name]
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'teuthology_config':
object.__setattr__(self, name, value)
else:
super(FakeNamespace, self).__setattr__(name, value)
def __repr__(self):
return repr(self._conf)
def __str__(self):
return str(self._conf)
def set_config_attr(obj):
"""
Set obj.teutholo
|
rveciana/BasemapTutorial
|
code_examples/backgrounds/shadedrelief.py
|
Python
|
cc0-1.0
| 273
| 0.032967
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(llcrnrlon=-10.5,llcrnrlat=33,urcrnrlon=10.,urcrnrlat=46.,
resolution='i',
|
projection='cass', lat_0 = 39.5, lon_0 = 0.)
map.shadedrelief()
map.drawcoastlines()
plt.show()
|
|
fallen/Pytition
|
pytition/petition/tests/tests_DelSlugView.py
|
Python
|
bsd-3-clause
| 1,208
| 0.004139
|
from django.test import TestCase
from
|
django.urls import reverse
from .utils import add_default_data
from petition.models import PytitionUser, Permission, Organization, Petition
class DelSlugViewTest(TestCase):
"""Test del_slug view"""
@classmethod
def setUpTestData(cls):
add_default_data()
def login(self, name, password=None):
self.client.login(username=name, password=password if password else name)
self.pu = Pytit
|
ionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def test_DelSlugViewOk(self):
john = self.login("john")
john_perms = Permission.objects.get(organization__slugname="attac", user=john)
john_perms.can_modify_petitions = True
john_perms.save()
petition = Petition.objects.filter(org__slugname="attac").first()
slug = petition.slugmodel_set.first()
response = self.client.get(reverse("del_slug", kwargs={'petition_id': petition.id})+"?slugid="+str(slug.id),
follow=True)
self.assertRedirects(response, reverse("edit_petition", args=[petition.id]) + "#tab_social_network_form")
|
giserh/grab
|
test/grab_limit_option.py
|
Python
|
mit
| 719
| 0
|
# coding: utf-8
from test.util import build_grab
from test.util import BaseGrabTestCase
class TestContentLimit(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_nobody(self):
g = build_grab()
g.setup(nobody=True)
self.server.response['get.data'] = 'foo'
g.go(self.server.get_url())
self.assertEqual(b'', g.response.body
|
)
self.assertTrue(len(g.response.head) > 0)
def test_body_maxsize(self):
g = build_grab()
g.setup(body_maxsize=100)
self.server.response['get.data'] = 'x' * 1024 * 1024
g.go(self.server.get_url())
# Should be less 50kb
self.assertTrue(len(g.response.body) < 500
|
00)
|
dennerlager/sepibrews
|
sepibrews/python_utils/converters.py
|
Python
|
gpl-3.0
| 6,552
| 0.004731
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import six
import math
def to_int(input_, default=0, exception=(ValueError, TypeError), regexp=None):
'''
Convert the given input to an integer or return default
When trying to convert the exceptions given in the exception parameter
are automatically catched and the default will be returned.
The regexp parameter allows for a regular expression to find the digits
in a string.
When True it will automatically match any digit in the string.
When a (regexp) object (has a search method) is given, that will be used.
WHen a string is given, re.compile will be run over it first
The last group of the regexp will be used as value
>>> to_int('abc')
0
>>> to_int('1')
1
>>> to_int('abc123')
0
>>> to_int('123abc')
0
>>> to_int('abc123', regexp=True)
123
>>> to_int('123abc', regexp=True)
123
>>> to_int('abc123abc', regexp=True)
123
>>> to_int('abc123abc456', regexp=True)
123
>>> to_int('abc123', regexp=re.compile('(\d+)'))
123
>>> to_int('123abc', regexp=re.compile('(\d+)'))
123
>>> to_int('abc123abc', regexp=re.compile('(\d+)'))
123
>>> to_int('abc123abc456', regexp=re.compile('(\d+)'))
123
>>> to_int('abc123', regexp='(\d+)')
123
>>> to_int('123abc', regexp='(\d+)')
123
>>> to_int('abc', regexp='(\d+)')
0
>>> to_int('abc123abc', regexp='(\d+)')
123
>>> to_int('abc123abc456', regexp='(\d+)')
123
>>> to_int('1234', default=1)
1234
>>> to_int('abc', default=1)
1
>>> to_int('abc', regexp=123)
Traceback (most recent call last):
...
TypeError: unknown argument for regexp parameter: 123
'''
if regexp is True:
regexp = re.compile('(\d+)')
elif isinstance(regexp, six.string_types):
regexp = re.compile(regexp)
elif hasattr(regexp, 'search'):
pass
elif regexp is not None:
raise TypeError('unknown argument for regexp parameter: %r' % regexp)
try:
if regexp:
match = regexp.search(input_)
if match:
input_ = match.groups()[-1]
return int(input_)
except exception:
return default
def to_float(input_, default=0, exception=(ValueError, TypeError),
regexp=None):
'''
Convert the given `input_` to an integer or return default
When trying to convert the exceptions given in the exception parameter
are automatically catched and the default will be returned.
The regexp parameter allows for a regular expression to find the digits
in a string.
When True it will automatically match any digit in the string.
When a (regexp) object (has a search method) is given, that will be used.
WHen a string is given, re.compile will be run over it first
The last group of the regexp will be used as value
>>> '%.2f' % to_float('abc')
'0.00'
>>> '%.2f' % to_float('1')
'1.00'
>>> '%.2f' % to_float('abc123.456', regexp=True)
'123.46'
>>> '%.2f' % to_float('abc123', regexp=True)
'123.00'
>>> '%.2f' % to_float('abc0.456', regexp=True)
'0.46'
>>> '%.2f' % to_float('abc123.456', regexp=re.compile('(\d+\.\d+)'))
'123.46'
>>> '%.2f' % to_float('123.456abc', regexp=re.compile('(\d+\.\d+)'))
'123.46'
>>> '%.2f' % to_float('abc123.46abc', regexp=re.compile('(\d+\.\d+)'))
'123.46'
>>> '%.2f' % to_float('abc123abc456', regexp=re.compile('(\d+(\.\d+|))'))
'123.00'
>>> '%.2f' % to_float('abc', regexp='(\d+)')
'0.00'
>>> '%.2f' % to_float('abc123', regexp='(\d+)')
'123.00'
>>> '%.2f' % to_float('123abc', regexp='(\d+)')
'123.00'
>>> '%.2f' % to_float('abc123abc', regexp='(\d+)')
'123.00'
>>> '%.2f' % to_float('abc123abc456', regexp='(\d+)')
'123.00'
>>> '%.2f' % to_float('1234', default=1)
'1234.00'
>>> '%.2f' % to_float('abc', default=1)
'1.00'
>>> '%.2f' % to_float('abc', regexp=123)
Traceback (most recent call last):
...
TypeError: unknown argument for regexp parameter
'''
if regexp is True:
regexp = re.compile('(\d+(\.\d+|))')
elif isinstance(regexp, six.string_types):
regexp = re.compile(regexp)
elif hasattr(regexp, 'search'):
pass
elif regexp is not None:
raise TypeError('unknown argument for regexp parameter')
try:
if regexp:
match = regexp.search(input_)
if match:
input_ = match.group(1)
return float(input_)
except exception:
return default
def to_unicode(input_, encoding='utf-8', errors='replace'):
'''Convert objects to unicode, if needed decodes string with the given
encoding and errors settings.
:rtype: unicode
>>> to_unicode(b'a')
'a'
>>> to_unicode('a')
'a'
>>> to_unicode(u'a')
'a'
>>> class Foo(object): __str__ = lambda s: u'a'
>>> to_unicode(Foo())
'a'
>>> to_unicode(Foo)
"<class 'python_utils.converters.Foo'>"
'''
if isinstance(input_, six.binary_type):
input_ = input_.decode(encoding, errors)
else:
input_ = six.text_type(input_)
return input_
def to_str(input_, encoding='utf-8', errors='replace'):
'''Convert objects to string, encodes to the given enco
|
ding
:rtype: str
>>> to_str('a')
b'a'
>>> to_str(u'a')
b'a'
>>> to_str(b'a')
b'a'
>>> class Foo(object): __str__ = lambda s: u'a'
>>> to_str(Foo())
'a'
>>> to_str(Foo)
"<class 'python_utils.converters.Foo'>"
'''
if isinstance(input_, six.binary_type):
pass
else:
if not hasattr(input_, 'encode'):
input_ = six.text_type(input_)
input_ = input_.encode(encoding, errors)
re
|
turn input_
def scale_1024(x, n_prefixes):
'''Scale a number down to a suitable size, based on powers of 1024.
Returns the scaled number and the power of 1024 used.
Use to format numbers of bytes to KiB, MiB, etc.
>>> scale_1024(310, 3)
(310.0, 0)
>>> scale_1024(2048, 3)
(2.0, 1)
>>> scale_1024(0, 2)
(0.0, 0)
>>> scale_1024(0.5, 2)
(0.5, 0)
>>> scale_1024(1, 2)
(1.0, 0)
'''
if x <= 0:
power = 0
else:
power = min(int(math.log(x, 2) / 10), n_prefixes - 1)
scaled = float(x) / (2 ** (10 * power))
return scaled, power
|
zzyyfff/doorbot
|
doorbot.py
|
Python
|
mit
| 918
| 0.003268
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 19:54:02 2017
@author: jonathan
"""
import os
from flask import Flask,
|
request, Response
from slackclient import SlackClient
from twilio.twiml.messaging_response import MessagingResponse
from twilio.rest import Client
TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER', None)
app = Flask(__name__)
slack_client = SlackClient(os.environ.get('SLACK_TOKEN', None))
twilio_client = Client()
@app.route('/twilio', methods=['POST'])
def twilio_post():
response = MessagingResponse()
message = request.form['Body']
slack_client.api_call("ch
|
at.postMessage", channel="#thedoor",
text=message, username='doorbot',
icon_emoji=':robot_face:')
# message="Recieved!"
# response.message(message)
return str(response)
if __name__ == '__main__':
app.run(port=(os.environ.get('PORT', None)), debug=True)
|
cfreundl/o3d3xx-python
|
o3d3xx/__init__.py
|
Python
|
mit
| 40
| 0.025
|
from .rpc import *
from .p
|
cic
|
import *
|
tjyang/vitess
|
py/vttest/run_local_database.py
|
Python
|
bsd-3-clause
| 3,537
| 0.007351
|
#!/usr/bin/env python
"""Command-line tool for starting a local Vitess database for testing.
USAGE:
$ run_local_database --port 12345 \
--topology test_keyspace/-80:test_keyspace_0,test_keyspace/80-:test_keyspace_1 \
--schema_dir /path/to/schema/dir
It will run the tool, logging to stderr. On stdout, a small json structure
can be waited on and then parsed by the caller to figure out how to reach
the vtgate process.
Once done with the test, send an empty line to this process for it to clean-up,
and then just wait for it to exit.
"""
import json
import logging
import optparse
import os
import re
import sys
from vttest import environment
from vttest import local_database
from vttest import mysql_flavor
from vttest import vt_processes
shard_exp = re.compile(r'(.+)/(.+):(.+)')
def main(port, topology, schema_dir, vschema, mysql_only):
shards = []
for shard in topology.split(','):
m = shard_exp.match(shard)
if m:
shards.append(
vt_processes.ShardInfo(m.group(1), m.group(2), m.group(3)))
else:
sys.stderr.write('invalid --shard flag format: %s\n' % shard)
sys.exit(1)
environment.b
|
ase_port = port
with local_database.LocalDatabase(shards, schema_dir, vschema, mysql_only) as local_db:
print json.dumps(local_db.config())
sys.stdout.flush()
try:
raw_input()
except EOFError:
sys.stderr.write(
'WARNING: %s: No empty line was received on stdin.'
' Instead, stdin was closed and the cluster will be shut down now.'
' Make sure to send the e
|
mpty line instead to proactively shutdown'
' the local cluster. For example, did you forget the shutdown in'
' your test\'s tearDown()?\n' % os.path.basename(__file__))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port', type='int',
help='Port to use for vtcombo. If this is 0, a random port '
'will be chosen.')
parser.add_option(
'-t', '--topology',
help='Define which shards exist in the test topology in the'
' form <keyspace>/<shardrange>:<dbname>,... The dbname'
' must be unique among all shards, since they share'
' a MySQL instance in the test environment.')
parser.add_option(
'-s', '--schema_dir',
help='Directory for initial schema files. Within this dir,'
' there should be a subdir for each keyspace. Within'
' each keyspace dir, each file is executed as SQL'
' after the database is created on each shard.'
' If the directory contains a vschema.json file, it'
' will be used as the vschema for the V3 API.')
parser.add_option(
'-e', '--vschema',
help='If this file is specified, it will be used'
' as the vschema for the V3 API.')
parser.add_option(
'-m', '--mysql_only', action='store_true',
help='If this flag is set only mysql is initialized.'
' The rest of the vitess components are not started.'
' Also, the output specifies the mysql unix socket'
' instead of the vtgate port.')
parser.add_option(
'-v', '--verbose', action='store_true',
help='Display extra error messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# This will set the flavor based on the MYSQL_FLAVOR env var,
# or default to MariaDB.
mysql_flavor.set_mysql_flavor(None)
main(options.port, options.topology, options.schema_dir, options.vschema, options.mysql_only)
|
facebookexperimental/eden
|
eden/hg-server/edenscm/mercurial/match.py
|
Python
|
gpl-2.0
| 53,143
| 0.000753
|
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# match.py - filename matching
#
# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import copy
import os
import re
from bindings import pathmatcher
from . import error, pathutil, pycompat, util
from .i18n import _
from .pycompat import decodeutf8
allpatternkinds = (
"re",
"glob",
"path",
"relglob",
"relpath",
"relre",
"listfile",
"listfile0",
"set",
"include",
"subinclude",
"rootfilesin",
)
cwdrelativepatternkinds = ("relpath", "glob")
propertycache = util.propertycache
def _rematcher(regex):
"""compile the regexp with the best available regexp engine and return a
matcher function"""
m = util.re.compile(regex)
try:
# slightly faster, provided by facebook's re2 bindings
return m.test_match
except AttributeError:
return m.match
def _expandsets(kindpats, ctx):
"""Returns the kindpats list with the 'set' patterns expanded."""
fset = set()
other = []
for kind, pat, source in kindpats:
if kind == "set":
if not ctx:
raise error.ProgrammingError("fileset expression with no " "context")
s = ctx.getfileset(pat)
fset.update(s)
continue
other.append((kind, pat, source))
return fset, other
def _expandsubinclude(kindpats, root):
"""Returns the list of subinclude matcher args and the kindpats without the
subincludes in it."""
relmatchers = []
other = []
for kind, pat, source in kindpats:
if kind == "subinclude":
sourceroot = pathutil.dirname(util.normpath(source))
pat = util.pconvert(pat)
path = pathutil.join(sourceroot, pat)
newroot = pathutil.dirname(path)
matcherargs = (newroot, "", [], ["include:%s" % path])
prefix = pathutil.canonpath(root, root, newroot)
if prefix:
prefix += "/"
relmatchers.append((prefix, matcherargs))
else:
other.append((kind, pat, source))
return relmatchers, other
def _kindpatsalwaysmatch(kindpats):
""" "Checks whether the kindspats match everything, as e.g.
'relpath:.' does.
"""
for kind, pat
|
, source in kindpats:
# TODO: update me?
if pat != "" or kind not in ["relpath", "glob"]:
return False
return True
def match(
root,
cwd,
patterns=None,
include=None,
exclude=Non
|
e,
default="glob",
exact=False,
auditor=None,
ctx=None,
warn=None,
badfn=None,
icasefs=False,
):
"""build an object to match a set of file patterns
arguments:
root - the canonical root of the tree you're matching against
cwd - the current working directory, if relevant
patterns - patterns to find
include - patterns to include (unless they are excluded)
exclude - patterns to exclude (even if they are included)
default - if a pattern in patterns has no explicit type, assume this one
exact - patterns are actually filenames (include/exclude still apply)
warn - optional function used for printing warnings
badfn - optional bad() callback for this matcher instead of the default
icasefs - make a matcher for wdir on case insensitive filesystems, which
normalizes the given patterns to the case in the filesystem
a pattern is one of:
'glob:<glob>' - a glob relative to cwd
're:<regexp>' - a regular expression
'path:<path>' - a path relative to repository root, which is matched
recursively
'rootfilesin:<path>' - a path relative to repository root, which is
matched non-recursively (will not match subdirectories)
'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
'relpath:<path>' - a path relative to cwd
'relre:<regexp>' - a regexp that needn't match the start of a name
'set:<fileset>' - a fileset expression
'include:<path>' - a file of patterns to read and include
'subinclude:<path>' - a file of patterns to match against files under
the same directory
'<something>' - a pattern of the specified default type
"""
if auditor is None:
auditor = pathutil.pathauditor(root)
normalize = _donormalize
if icasefs:
if exact:
raise error.ProgrammingError(
"a case-insensitive exact matcher " "doesn't make sense"
)
dirstate = ctx.repo().dirstate
dsnormalize = dirstate.normalize
def normalize(patterns, default, root, cwd, auditor, warn):
kp = _donormalize(patterns, default, root, cwd, auditor, warn)
kindpats = []
for kind, pats, source in kp:
if kind not in ("re", "relre"): # regex can't be normalized
p = pats
pats = dsnormalize(pats)
# Preserve the original to handle a case only rename.
if p != pats and p in dirstate:
kindpats.append((kind, p, source))
kindpats.append((kind, pats, source))
return kindpats
if exact:
m = exactmatcher(root, cwd, patterns, badfn)
elif patterns:
kindpats = normalize(patterns, default, root, cwd, auditor, warn)
if _kindpatsalwaysmatch(kindpats):
m = alwaysmatcher(root, cwd, badfn, relativeuipath=True)
else:
m = patternmatcher(root, cwd, kindpats, ctx=ctx, badfn=badfn)
else:
# It's a little strange that no patterns means to match everything.
# Consider changing this to match nothing (probably using nevermatcher).
m = alwaysmatcher(root, cwd, badfn)
if include:
kindpats = normalize(include, "glob", root, cwd, auditor, warn)
im = includematcher(root, cwd, kindpats, ctx=ctx, badfn=None)
m = intersectmatchers(m, im)
if exclude:
kindpats = normalize(exclude, "glob", root, cwd, auditor, warn)
em = includematcher(root, cwd, kindpats, ctx=ctx, badfn=None)
m = differencematcher(m, em)
return m
def exact(root, cwd, files, badfn=None):
return exactmatcher(root, cwd, files, badfn=badfn)
def always(root, cwd):
return alwaysmatcher(root, cwd)
def never(root, cwd):
return nevermatcher(root, cwd)
def union(matches, root, cwd):
"""Union a list of matchers.
If the list is empty, return nevermatcher.
If the list only contains one non-None value, return that matcher.
Otherwise return a union matcher.
"""
matches = list(filter(None, matches))
if len(matches) == 0:
return nevermatcher(root, cwd)
elif len(matches) == 1:
return matches[0]
else:
return unionmatcher(matches)
def badmatch(match, badfn):
"""Make a copy of the given matcher, replacing its bad method with the given
one.
"""
m = copy.copy(match)
m.bad = badfn
return m
def _donormalize(patterns, default, root, cwd, auditor, warn):
"""Convert 'kind:pat' from the patterns list to tuples with kind and
normalized and rooted patterns and with listfiles expanded."""
kindpats = []
for kind, pat in [_patsplit(p, default) for p in patterns]:
if kind in cwdrelativepatternkinds:
pat = pathutil.canonpath(root, cwd, pat, auditor)
elif kind in ("relglob", "path", "rootfilesin"):
pat = util.normpath(pat)
elif kind in ("listfile", "listfile0"):
try:
files = decodeutf8(util.readfile(pat))
if kind == "listfile0":
files = files.split("\0")
else:
files = files.splitlines()
files = [f for f in file
|
chrisxue815/leetcode_python
|
problems/test_1348.py
|
Python
|
unlicense
| 1,378
| 0.000726
|
import unittest
from typing import List
import sortedcontainers
import utils
class TweetCounts:
def __init__(self):
self.tweets = {}
def recordTweet(self, tweetName: str, time: int) -> None:
if tweetName in self.tweets:
times = self.tweets[tweetName]
else:
self.tweets[tweetName] = times = sortedcontainers.SortedList()
times.add(time)
def getTweetCountsPerFrequency(self, freq: str, tweetName: str, startTime: int, endTime:
|
int) -> List[int]:
if tweetName not in self.tweets:
return []
if freq == 'minute':
step = 60
elif freq == 'hour':
step = 3600
else:
step = 86400
times = self.tweets[tweetName]
index = times.bisect_left(startTime)
endTime += 1
result = []
for start in range(startTime, endTime, step):
end = min(endTime, start + step)
|
count = 0
while index < len(times):
time = times[index]
if time >= end:
break
count += 1
index += 1
result.append(count)
return result
class Test(unittest.TestCase):
def test(self):
utils.test_invocations(self, __file__, TweetCounts)
if __name__ == '__main__':
unittest.main()
|
axbaretto/beam
|
sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
Python
|
apache-2.0
| 6,392
| 0
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
de
|
f get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = sel
|
f
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
|
blueyed/coveragepy
|
tests/test_summary.py
|
Python
|
apache-2.0
| 33,253
| 0.000872
|
# coding: utf-8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Test text-based summary reporting for coverage.py"""
import glob
import os
import os.path
import py_compile
import re
import coverage
from coverage import env
from coverage.backward import StringIO
from coverage.config import CoverageConfig
from coverage.control import Coverage
from coverage.data import CoverageData
from coverage.misc import CoverageException, output_encoding
from coverage.summary import SummaryReporter
from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin
class SummaryTest(UsingModulesMixin, CoverageTest):
"""Tests of the text summary reporting for coverage.py."""
def make_mycode(self):
"""Make the mycode.py file when needed."""
self.make_file("mycode.py", """\
import covmod1
import covmodzip1
a = 1
print('done')
""")
self.omit_site_packages()
def omit_site_packages(self):
"""Write a .coveragerc file that will omit site-packages from reports."""
self.make_file(".coveragerc", """\
[report]
omit = */site-packages/*
""")
def test_report(self):
self.make_mycode()
out = self.run_command("coverage run mycode.py")
self.assertEqual(out, 'done\n')
report = self.report_from_command("coverage report")
# Name Stmts Miss Cover
# ------------------------------------------------------------------
# c:/ned/coverage/tests/modules/covmod1.py 2 0 100%
# c:/ned/coverage/tests/zipmods.zip/covmodzip1.py 2 0 100%
# mycode.py 4 0 100%
# ------------------------------------------------------------------
# TOTAL 8 0 100%
self.assertNotIn("/coverage/__init__/", report)
self.assertIn("/tests/modules/covmod1.py ", report)
self.assertIn("/tests/zipmods.zip/covmodzip1.py ", report)
self.assertIn("mycode.py ", report)
self.assertEqual(self.last_line_squeezed(report), "TOTAL 8 0 100%")
def test_report_just_one(self):
# Try reporting just one module
self.make_mycode()
self.run_command("coverage run mycode.py")
report = self.report_from_command("coverage report mycode.py")
# Name Stmts Miss Cover
# -------------------------------
# mycode.py 4 0 100%
self.assertEqual(self.line_count(report), 3)
self.assertNotIn("/coverage/", report)
self.assertNotIn("/tests/modules/covmod1.py ", report)
self.assertNotIn("/tests/zipmods.zip/covmodzip1.py ", report)
self.assertIn("mycode.py ", report)
self.assertEqual(self.last_line_squeezed(report), "mycode.py 4 0 100%")
def test_report_wildcard(self):
# Try reporting using wildcards to get the modules.
self.make_mycode()
self.run_command("coverage run mycode.py")
report = self.report_from_command("coverage report my*.py")
# Name Stmts Miss Cover
# -------------------------------
# mycode.py 4 0 100%
self.assertEqual(self.line_count(report), 3)
self.assertNotIn("/coverage/", report)
self.assertNotIn("/tests/modules/covmod1.py ", report)
self.assertNotIn("/tests/zipmods.zip/covmodzip1.py ", report)
self.assertIn("mycode.py ", report)
self.assertEqual(self.last_line_squeezed(report), "mycode.py 4 0 100%")
def test_report_omitting(self):
# Try reporting while omitting some modules
self.make_mycode()
self.run_command("coverage run mycode.py")
omit = '{}/*,*/site-packages/*'.format(TESTS_DIR)
report = self.report_from_command("coverage report --omit '{}'".format(omit))
# Name Stmts Miss Cover
# -------------------------------
# mycode.py 4 0 100%
self.assertEqual(self.line_count(report), 3)
self.assertNotIn("/coverage/", report)
self.assertNotIn("/tests/modules/covmod1.py ", report)
self.assertNotIn("/tests/zipmods.zip/covmodzip1.py ", report)
self.assertIn("mycode.py ", report)
self.assertEqual(self.last_line_squeezed(report), "mycode.py 4 0 100%")
def test_report_including(self):
# Try reporting while including some modules
self.make_mycode()
self.run_command("coverage run mycode.py")
report = self.report_from_command("coverage report --include=mycode*")
# Name Stmts Miss Cover
# -------------------------------
# mycode.py 4 0 100%
self.assertEqual(self.line_count(report), 3)
self.assertNotIn("/coverage/", report)
self.assertNotIn("/tests/modules/covmod1.py ", report)
self.assertNotIn("/tests/zipmods.zip/covmodzip1.py ", report)
self.assertIn("mycode.py ", report)
self.assertEqual(self.last_line_squeezed(report), "mycode.py 4 0 100%")
def test_run_source_vs_report_include(self):
# https://bitbucket.org/ned/coveragepy/issues/621/include-ignored-warning-when-using
self.make_file(".coveragerc", """\
[run]
source = .
[report]
include = mod/*,tests/*
""")
# It should be OK to use that configuration.
cov = coverage.Coverage()
with self.assert_warnings(cov, []):
cov.start()
cov.stop() # pragma: nested
def test_run_omit_vs_report_omit(self):
# https://bitbucket.org/ned/coveragepy/issues/622/report-omit-overwrites-run-omit
# report:omit shouldn't clobber run:omit.
self.make_mycode()
self.make_file(".coveragerc", """\
[run]
omit = */covmodzip1.py
[report]
omit = */covmod1.py
""")
self.run_command("coverage run mycode.py")
# Read the data written, to see that the right files have been omitted from running.
covdata = CoverageData()
covdata.read_file(".coverage")
files = [os.path.basename(p) for p in covdata.measured_files()]
self.assertIn("covmod1.py", files)
self.assertNotIn("covmodzip1.py", files)
def test_report_branches(self):
self.make_file("mybranch.py", """\
def branch(x):
if x:
print("x")
return x
branch(1)
""")
out = self.run_command("coverage run --branch mybranch.py")
self.assertEqual(out, 'x\n')
report = self.report_from_command("coverage report")
# Name Stmts Miss Branch BrPart Cover
# -----------------------------------------------
# mybranch.py 5 0 2 1 85%
self.assertEqual(self.line_count(report), 3)
self.assertIn("mybranch.py ", re
|
port)
self.assertEqual(self.last_line_squeezed(report), "mybranch.py 5 0 2 1 86%")
def test_report_show_missing(self):
self.make_file("mymissing.py", """\
def missing(x, y):
if x:
print("x")
return x
if y:
print("y")
try:
print("z")
1/0
print("Never!")
|
except ZeroDivisionError:
pass
return x
missing(0, 1)
""")
out = self.run_command("coverage run mymissing.py")
self.assertEqual(out, 'y\nz\n')
report = self.report_from_command("coverage report --show-missing")
# Name Stmts Miss Cover Missing
# --------------------------------------------
# mymi
|
acil-bwh/SlicerCIP
|
Scripted/attic/PicasaSnap/gdata/oauth/__init__.py
|
Python
|
bsd-3-clause
| 19,714
| 0.003348
|
import cgi
import urllib.request, urllib.parse, urllib.error
import time
import random
import urllib.parse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.parse.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.parse.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.items():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.items():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.items()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = list(params.items())
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urllib.parse.urlparse(self.http_url)
host = parts[1].lower()
if host.endswith(':80') or host.endswith(':443'):
host = host.split(':')[0]
url_string = '%s://%s%s' % (parts[0], host, parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to
|
parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = O
|
AuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urllib.parse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters =
|
herilalaina/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
Python
|
bsd-3-clause
| 43,823
| 0.000046
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(r
|
oc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, avera
|
ge="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_
|
harishanand95/cockpit
|
test/avocado/seleniumlib.py
|
Python
|
lgpl-2.1
| 12,177
| 0.007062
|
#!/usr/bin/python2
""" SETUP tasks
# workaround for RHEL7
# curl https://copr.fedoraproject.org/coprs/lmr/Autotest/repo/epel-7/lmr-Autotest-epel-7.repo > /etc/yum.repos.d/lmr-Autotest-epel-7.repo
# yum --nogpgcheck -y install python-pip
# pip install selenium
yum --nogpgcheck -y install avocado python-selenium
adduser test
echo superhardpasswordtest5554 | passwd --stdin test
usermod -a -G wheel test
# in case of you would like to use selenium server in docker:
docker run -d -p 4444:4444 --name selenium-hub selenium/hub:2.48.2
docker run -d --link selenium-hub:hub selenium/node-chrome:2.48.2
docker run -d --link selenium-hub:hub selenium/node-firefox:2.48.2
systemctl start cockpit
# RUN AS
avocado run selenium-login.py
# OR ALTERNATIVELY with docker selenium server (BROWSER=firefox or chrome)
HUB=localhost BROWSER=chrome GUEST=`hostname -i` avocado run selenium-login.py
"""
import inspect
import selenium.webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import os
import time
from avocado import Test
from timeoutlib import Retry
user = "test"
passwd = "superhardpasswordtest5554"
# path for storing selenium screenshots
actualpath = "."
# use javascript to generate clicks in the browsers and add more javascript checks for elements
# this prevents races where the test clicks in the wrong place because the page layout changed
javascript_operations = True
vi
|
sible = EC.visibility_of_element_located
clickable = EC.element_to_be_clickable
invisible = EC.invisibility_of_element_located
|
frame = EC.frame_to_be_available_and_switch_to_it
class SeleniumTest(Test):
"""
:avocado: disable
"""
def setUp(self):
if not (os.environ.has_key("HUB") or os.environ.has_key("BROWSER")):
@Retry(attempts = 3, timeout = 30, error = Exception('Timeout: Unable to attach firefox driver'))
def connectfirefox():
self.driver = selenium.webdriver.Firefox()
connectfirefox()
guest_machine = 'localhost'
else:
selenium_hub = os.environ["HUB"] if os.environ.has_key("HUB") else "localhost"
browser = os.environ["BROWSER"] if os.environ.has_key("BROWSER") else "firefox"
if browser == "explorer":
browser = "internet explorer"
guest_machine = os.environ["GUEST"]
@Retry(attempts = 3, timeout = 30, error = Exception('Timeout: Unable to attach remote Browser on hub'))
def connectbrowser():
self.driver = selenium.webdriver.Remote(command_executor='http://%s:4444/wd/hub' % selenium_hub, desired_capabilities={'browserName': browser})
connectbrowser()
self.driver.set_window_size(1400, 1200)
self.driver.set_page_load_timeout(90)
# self.default_try is number of repeats for finding element
self.default_try = 40
# stored search function for each element to be able to refresh element in case of detached from DOM
self.element_wait_functions = { }
# self.default_explicit_wait is time for waiting for element
# default_explicit_wait * default_try = max time for waiting for element
self.default_explicit_wait = 1
@Retry(attempts = 3, timeout = 30, error = Exception('Timeout: Unable to get page'))
def connectwebpage():
self.driver.get('http://%s:9090' % guest_machine)
connectwebpage()
# if self.error evaluates to True when a test finishes,
# an error is raised and a screenshot generated
self.error = True
def tearDown(self):
if self.error:
screenshot_file = ""
try:
# use time.clock() to ensure that snapshot files are unique and ordered
# sample name is like: screenshot-teardown-172434.png
screenshot_file = "screenshotTeardown%s.png" % str(time.clock())[2:]
self.driver.save_screenshot(os.path.join(actualpath,screenshot_file))
self.log.error("Screenshot(teardown) - Wrote: " + screenshot_file)
self.get_debug_logs()
except Exception as e:
screenshot_file = "Unable to catch screenshot: {0}".format(screenshot_file)
raise Exception('ERR: Unable to store screenshot: %s' % screenshot_file, str(e))
try:
self.driver.close()
self.driver.quit()
except Exception as e:
self.get_debug_logs()
if self.error:
raise Exception('ERR: Unable to close WEBdriver', str(e))
else:
self.log.info('ERR: Unable to close WEBdriver: {0}'.format(e))
def get_debug_logs(self, logs=['browser','driver','client','server']):
max_line_log_count = 10
for log in logs:
receivedlog = [x for x in self.driver.get_log(log)][-max_line_log_count:]
if receivedlog:
self.log.info(">>>>> " + log)
for line in receivedlog:
self.log.info(" {0}".format(line))
def everything_loaded(self, element):
"""
This function is only for internal purposes:
It via javascript check that attribute data-loaded is in element
"""
if javascript_operations:
return self.driver.execute_script("return arguments[0].getAttribute('data-loaded')", element)
else:
return True
def click(self, element):
failure = "CLICK: too many tries"
usedfunction = self.element_wait_functions[element] if element in self.element_wait_functions else None
for foo in range(0, self.default_try):
try:
if javascript_operations:
self.driver.execute_script("arguments[0].click();", element)
else:
element.click()
failure = None
break
except Exception as e:
failure = e
pass
try:
element = usedfunction() if usedfunction else element
self.everything_loaded(element)
except:
pass
if failure:
raise Exception('ERR: Unable to CLICK on element ', str(failure))
def send_keys(self, element, text, clear = True):
if clear:
element.clear()
element.send_keys(text)
if javascript_operations:
self.driver.execute_script('var ev = document.createEvent("Event"); ev.initEvent("change", true, false); arguments[0].dispatchEvent(ev);', element)
self.driver.execute_script('var ev = document.createEvent("Event"); ev.initEvent("keydown", true, false); arguments[0].dispatchEvent(ev);', element)
def check_box(self, element, checked=True):
if element.get_attribute('checked') != checked:
element.click()
def wait(self, method, text, baseelement, overridetry, fatal, cond, jscheck):
"""
This function is low level, tests should prefer to use the wait_* functions:
This function stores caller function for this element to an internal dictionary, in case that
element is lost and has to be renewed (-> self.element_wait_functions)
parameters:
method - used selenium method
text - what are you searching for
baseelement - use some element as root of tree, not self.driver
overridetry - change value of repeats
fatal - boolean if search is fatal or notice
cond - use selenium conditions (aliases are defined above class)
jscheck - use javascipt to wait for element has attribute-data loaded, it is safer, but slower
"""
if not baseelement:
baseelement = self.driver
returned = None
cond = cond if cond else visible
internaltry = overridetry if overridetry else self.default_try
usedfunction = lambda :WebDriverWait(baseelement, self.default_explicit_wait).until(cond((method, text)))
for foo in range(0, internaltry):
try:
returned = usedf
|
CTSRD-CHERI/u-boot
|
tools/patman/test_checkpatch.py
|
Python
|
gpl-2.0
| 12,954
| 0.001933
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: GPL-2.0+
#
# Tests for U-Boot-specific checkpatch.pl features
#
# Copyright (c) 2011 The Chromium OS Authors.
#
import os
import tempfile
import unittest
from patman import checkpatch
from patman import gitutil
from patman import patchstream
from patman import series
from patman import commit
class Line:
def __init__(self, fname, text):
self.fname = fname
self.text = text
class PatchMaker:
def __init__(self):
self.lines = []
def add_line(self, fname, text):
self.lines.append(Line(fname, text))
def get_patch_text(self):
base = '''From 125b77450f4c66b8fd9654319520bbe795c9ef31 Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Sun, 14 Jun 2020 09:45:14 -0600
Subject: [PATCH] Test commit
This is a test commit.
Signed-off-by: Simon Glass <sjg@chromium.org>
---
'''
lines = base.splitlines()
# Create the diffstat
change = 0
insert = 0
for line in self.lines:
lines.append(' %s | 1 +' % line.fname)
change += 1
insert += 1
lines.append(' %d files changed, %d insertions(+)' % (change, insert))
lines.append('')
# Create the patch info for each file
for line in self.lines:
lines.append('diff --git a/%s b/%s' % (line.fname, line.fname))
lines.append('index 7837d459f18..5ba7840f68e 100644')
lines.append('--- a/%s' % line.fname)
lines.append('+++ b/%s' % line.fname)
lines += ('''@@ -121,6 +121,7 @@ enum uclass_id {
UCLASS_W1, /* Dallas 1-Wire bus */
UCLASS_W1_EEPROM, /* one-wire EEPROMs */
UCLASS_WDT, /* Watchdog Timer driver */
+%s
UCLASS_COUNT,
UCLASS_INVALID = -1,
''' % line.text).splitlines()
lines.append('---')
lines.append('2.17.1')
return '\n'.join(lines)
def get_patch(self):
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
infd.write(self.get_patch_text())
infd.close()
return inname
def run_checkpatch(self):
return checkpatch.CheckPatch(self.get_patch(), show_types=True)
class TestPatch(unittest.TestCase):
"""Test the u_boot_line() function in checkpatch.pl"""
def testBasic(self):
"""Test basic filter operation"""
data='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
‘long long unsigned int’, but argument 3 has type
‘u64 {aka long unsigned int}’ [-Wformat=]
BUG=chromium-os:13875
TEST=build U-Boot for Seaboard, boot
Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
Review URL: http://codereview.chromium.org/6900006
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
expected='''Message-Id: <19991231235959.0.I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413@changeid>
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
‘long long unsigned int’, but argument 3 has type
‘u64 {aka long unsigned int}’ [-Wformat=]
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
out = ''
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w', encoding='utf-8')
infd.write(data)
infd.close()
exphandle, expname = tempfile.mkstemp()
expfd = os.fdopen(exphandle, 'w', encoding='utf-8')
expfd.write(expected)
expfd.close()
# Normally by the time we call FixPatch we've already collected
# metadata. Here, we haven't, but at least fake up something.
# Set the "count" to -1 which tells FixPatch to use a bogus/fixed
# time for generating the Message-Id.
com = commit.Commit('')
com.change_id = 'I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413'
com.count = -1
patchstream.FixPatch(None, inname, series.Series(), com)
rc = os.system('diff -u %s %s' % (inname, expname))
self.assertEqual(rc, 0)
os.remove(inname)
os.remove(expname)
def GetData(self, data_type):
data='''From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 7 Apr 2011 10:14:41 -0700
Subject: [PATCH 1/4] Add microsecond boot time measurement
This defines the basics of a new boot time measurement feature. This allows
logging of very accurate time measurements as the boot proceeds, by using
an available microsecond counter.
%s
---
README | 11 ++++++++
MAINTAINERS | 3 ++
common/bootstage.c | 50 ++++++++++++++++++++++++++++++++++++
include/bootstage.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
include/common.h | 8 ++++++
5 files changed, 141 insertions(+), 0 deletions(-)
create mode 100644 common/bootstage.c
create mode 100644 include/bootstage.h
diff --git a/README b/README
index 6
|
f3748d..f9e4e65 100644
--- a/README
+++ b/README
@@ -2026,6 +2026,17 @@ The following options need to be configured:
example, some LED's) on your board. At the moment,
the following checkpoints are implemented:
+- Time boot progress
+ CONFIG_BOOTSTAGE
+
+ Define this option to enable microsecond boot stage timing
+ on supported platforms. F
|
or this to work your platform
+ needs to define a function timer_get_us() which returns the
+ number of microseconds since reset. This would normally
+ be done in your SOC or board timer.c file.
+
+ You can add calls to bootstage_mark() to set time markers.
+
- Standalone program support:
CONFIG_STANDALONE_LOAD_ADDR
diff --git a/MAINTAINERS b/MAINTAINERS
index b167b028ec..beb7dc634f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -474,3 +474,8 @@ S: Maintained
T: git git://git.denx.de/u-boot.git
F: *
F: */
+
+BOOTSTAGE
+M: Simon Glass <sjg@chromium.org>
+L: u-boot@lists.denx.de
+F: common/bootstage.c
diff --git a/common/bootstage.c b/common/bootstage.c
new file mode 100644
index 0000000..2234c87
--- /dev/null
+++ b/common/bootstage.c
@@ -0,0 +1,37 @@
+%s
+/*
+ * Copyright (c) 2011, Google Inc. All rights reserved.
+ *
+ */
+
+/*
+ * This module records the progress of boot and arbitrary commands, and
+ * permits accurate timestamping of each. The records can optionally be
+ * passed to kernel in the ATAGs
+ */
+
+#include <common.h>
+
+struct bootstage_record {
+ u32 time_us;
+ const char *name;
+};
+
+static struct bootstage_record record[BOOTSTAGE_COUNT];
+
+u32 bootstage_mark(enum bootstage_id id, const char *name)
+{
+ struct bootstage_record *rec = &record[id];
+
+ /* Only record the first event for each */
+%sif (!rec->name) {
+ rec->time_us = (u32)timer_get_us();
+ rec->name = name;
+ }
+ if (!rec->name &&
+ %ssomething_else) {
+ rec->time_us = (u32)timer_get_us();
+ rec->name = name;
+ }
+%sreturn rec->time_us;
+}
--
1.7.3.1
'''
signoff = 'Signed-off-by: Simon Glass <sjg@chromium.org>\n'
license = '// SPDX-License-Identifier: GPL-2.0+'
tab = ' '
indent = ' '
if data_type == 'good':
pass
elif data_type == 'no-signoff':
signoff = ''
elif data_type == 'no-license':
license = ''
elif data_type == 'spaces':
tab = ' '
|
whitehorse-io/encarnia
|
pyenv/lib/python2.7/site-packages/twisted/names/test/test_dns.py
|
Python
|
mit
| 154,060
| 0.003778
|
# test-case-name: twisted.names.test.test_dns
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.names.dns.
"""
from __future__ import division, absolute_import
from io import BytesIO
import struct
from zope.interface.verify import verifyClass
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin, FancyStrMixin
from twisted.internet import address, task
from twisted.internet.error import CannotListenError, ConnectionDone
from twisted.trial import unittest
from twisted.names import dns
from twisted.test import proto_helpers
from twisted.test.testutils import ComparisonTestsMixin
RECORD_TYPES = [
dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME,
dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR,
dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL,
dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP,
dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT,
dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR, dns.UnknownRecord,
]
class Ord2ByteTests(unittest.TestCase):
"""
Tests for L{dns._ord2bytes}.
"""
def test_ord2byte(self):
"""
L{dns._ord2byte} accepts an integer and returns a byte string of length
one with an ordinal value equal to the given integer.
"""
self.assertEqual(b'\x10', dns._ord2bytes(0x10))
class Str2TimeTests(unittest.TestCase):
"""
Tests for L{dns.str2name}.
"""
def test_nonString(self):
"""
When passed a non-string object, L{dns.str2name} returns it unmodified.
"""
time = object()
self.assertIs(time, dns.str2time(time))
def test_seconds(self):
"""
Passed a string giving a number of seconds, L{dns.str2time} returns the
number of seconds represented. For example, C{"10S"} represents C{10}
seconds.
"""
self.assertEqual(10, dns.str2time("10S"))
def test_minutes(self):
"""
Like C{test_seconds}, but for the C{"M"} suffix which multiplies the
time value by C{60} (the number of seconds in a minute!).
"""
self.assertEqual(2 * 60, dns.str2time("2M"))
def test_hours(self):
"""
Like C{test_seconds}, but for the C{"H"} suffix which multiplies the
time value by C{3600}, the number of seconds in an hour.
"""
self.assertEqual(3 * 3600, dns.str2time("3H"))
def test_days(self):
"""
Like L{test_seconds}, but for the C{"D"} suffix which multiplies the
time value by C{86400}, the number of seconds in a day.
"""
self.assertEqual(4 * 86400, dns.str2time("4D"))
def test_weeks(self):
"""
Like L{test_seconds}, but for the C{"W"} suffix which multiplies the
time value by C{604800}, the number of seconds in a week.
"""
self.assertEqual(5 * 604800, dns.str2time("5W"))
def test_years(self):
"""
Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the
time value by C{31536000}, the number of seconds in a year.
"""
self.assertEqual(6 * 31536000, dns.str2time("6Y"))
def test_invalidPrefix(self):
"""
If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}.
"""
self.assertRaises(ValueError, dns.str2time, "fooS")
class NameTests(unittest.TestCase):
"""
Tests for L{Name}, the representation of a single domain name with support
for encoding into and decoding from DNS message format.
"""
def test_nonStringName(self):
"""
When constructed with a name which is neither C{bytes} nor C{str},
L{Name} raises L{TypeError}.
"""
self.assertRaises(TypeError, dns.Name, 123)
self.assertRaises(TypeError, dns.Name, object())
self.assertRaises(TypeError, dns.Name, [])
def test_unicodeName(self):
"""
L{dns.Name} automatically encodes unicode domain name using C{idna}
encoding.
"""
name = dns.Name(u'\u00e9chec.example.org')
self.assertIsInstance(name.name, bytes)
self.assertEqual(b'xn--chec-9oa.example.org', name.name)
def test_decode(self):
"""
L{Name.decode} populates the L{Name} instance with name information read
from the file-like object passed to it.
"""
n = dns.Name()
n.decode(BytesIO(b"\x07example\x03com\x00"))
self.assertEqual(n.name, b"example.com")
def test_encode(self):
"""
L{Name.encode} encodes its name information and writes it to the
file-like object passed to it.
"""
name = dns.Name(b"foo.example.com")
stream = BytesIO()
name.encode(stream)
self.assertEqual(stream.getvalue(), b"\x03foo\x07example\x03com\x00")
def test_encodeWithCompression(self):
"""
If a compression dictionary is passed to it, L{Name.encode} uses offset
information from it to encode its name with references to existing
labels in the stream instead of including another copy of them in the
output. It also updates the compression dictionary with the location of
the name it writes to the stream.
"""
name = dns.Name(b"foo.example.com")
compression = {b"example.com": 0x17}
# Some bytes already encoded into the stream for this message
previous = b"some prefix to change .tell()"
stream = BytesIO()
stream.write(previous)
# The position at which the encoded form of this new name will appear in
# the stream.
expected = len(previous) + dns.Message.headerSize
name.encode(stream, compression)
self.assertEqual(
b"\x03foo\xc0\x17",
stream.getvalue()[len(previous):])
self.assertEqual(
{b"example.com": 0x17, b"foo.example.com": expected},
compression)
def test_unknown(self):
"""
A resource record of unknown type and class is parsed into an
L{UnknownRecord} instance with its data preserved, and an
L{UnknownRecord} instance is serialized to a string equal to the one it
was parsed from.
"""
wire = (
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive
# bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x01' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x01' # number of additionals
# query
b'\x03foo\x03bar\x00' # foo.bar
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
# 1st answer
b'\xc0\x0c' # foo.bar - compressed
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x08somedata' # some payload data
# 1st additional
b'\x03baz\x03ban\x00' # baz.ban
b'\x00\x01' # type=A
b'\x00\x01' # cls=IN
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x04' # len=4
b'\x01\x02\x03\x04' # 1.2.3.4
|
)
m
|
sg = dns.Message()
msg.fromStr(wire)
self.assertEqual(msg.queries, [
dns.Query(b'foo.bar', type=0xdead, cls=0xbeef),
])
self.assertEqual(msg.answers, [
dns.RRHeader(b'foo.bar', type=0xdead, cls=0xbeef, ttl=257,
payload=dns.UnknownRecord(b'somedata', ttl=257)),
])
self.assertEqual(msg.additional, [
dns.RRHeader(b'baz.ban', type=dns.A, cls=dns.IN, ttl=257,
p
|
turon/openthread
|
tools/harness-automation/cases/router_5_5_4.py
|
Python
|
bsd-3-clause
| 1,877
| 0
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# Al
|
l rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provid
|
ed with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Router_5_5_4(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 5 4'
golden_devices_required = 5
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
SmingHub/Sming
|
Sming/Components/Storage/Tools/hwconfig/common.py
|
Python
|
lgpl-3.0
| 3,086
| 0.00324
|
#
# Common functions and definitions
#
import os, sys, json, platform
from collections import OrderedDict
sys.path.insert(1, os.path.expandvars('${SMING_HOME}/../Tools/Python'))
from rjsmin import jsmin
quiet = False
def status(msg):
"""Print status message to stderr."""
if not quiet:
critical(msg)
def critical(msg):
"""Print critical message to stderr."""
sys.stderr.write(msg)
sys.stderr.write('\n')
def fixpath(path):
"""Paths in Windows can get a little weird """
if len(path) > 2 and path[1] != ':' and platform.system() == 'Windows' and path[2] == '/':
return path[1] + ':' + path[2:]
return path
def parse_int(v, keywords=None):
"""Generic parser for integer fields.
int(x,0) with provision for k/m/K/M suffixes and 'keyword' value lookup.
"""
if not isinstance(v, str):
return v
if keywords is None or len(keywords) == 0:
try:
for letter, multiplier in [("k", 1024), ("m", 1024 * 1024), ("g", 1024 * 1024 * 1024)]:
if v.lower().endswith(letter):
return round(float(v[:-1]) * multiplier)
return int(v, 0)
except ValueError:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def stringnum(s):
"""Return number if s contains only digits, otherwise return the string."""
return int(s) if s.isdigit() else s
def addr_format(a):
return "0x%08x" % a
def size_format(a):
if a == 0:
return '0'
for (val, suffix) in [(0x40000000, "G"), (0x100000, "M"), (0x400, "K")]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%08x" % a
def size_frac_str(a):
KB = 1024
MB = KB * 1024
GB = MB * 1024
if a >= GB:
div = GB
unit = 'G'
elif a >= MB:
div = MB
unit = 'M'
else:
div = KB
unit = 'K'
if a % div == 0:
return "%u%s" % (a // div, unit)
else:
return "%.2f%s" % (a / div, unit)
def quote(v):
return '"' + v + '"'
def contains_whitespace(s):
return ''.join(s.split()) != s
def json_loads(s):
return json.loads(jsmin(s), object_pairs_hook=OrderedDict)
def json_load(filename):
with open(filename) as f:
return json_loads(f.read())
def json_save(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def to_js
|
on(obj):
return json.dumps(obj, indent=4)
def lookup_keyword(t, keywords):
for k, v in keywords.items():
if t == v:
return k
return "%d" % t
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, obj, message):
supe
|
r(ValidationError, self).__init__("%s.%s '%s' invalid: %s" % (type(obj).__module__, type(obj).__name__, obj.name, message))
self.obj = obj
|
ASaiM/tools-iuc
|
tools/meme/fimo_wrapper.py
|
Python
|
mit
| 6,150
| 0.003902
|
#!/usr/bin/env python
import argparse
import os
import shutil
import string
import subprocess
import sys
import tempfile
BUFFSIZE = 1048576
# Translation table for reverse Complement, with ambiguity codes.
DNA_COMPLEMENT = string.maketrans("ACGTRYKMBDHVacgtrykmbdhv", "TGCAYRMKVHDBtgcayrmkvhdb")
def reverse(sequence):
# Reverse sequence string.
return sequence[::-1]
def dna_complement(sequence):
# Complement DNA sequence string.
return sequence.translate(DNA_COMPLEMENT)
def dna_reverse_complement(sequence):
# Returns the reverse complement of the sequence.
sequence = reverse(sequence)
return dna_complement(sequence)
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('--input_motifs', dest='input_motifs', help='MEME output formatted files for input to fimo')
parser.add_argument('--input_fasta', dest='input_fasta', help='Fassta sequence file')
parser.add_argument('--options_type', dest='options_type', help='Basic or Advance options')
parser.add_argument('--input_psp', dest='input_psp', default=None, help='File containing position specific priors')
parser.add_argument('--input_prior_dist', dest='input_prior_dist', default=None, help='File containing binned distribution of priors')
parser.add_argument('--alpha', dest='alpha', type=float, default=1.0, help='The alpha parameter for calculat
|
ing position specific priors')
parser.add_argument('--bgfile', dest='bgfile', default=None, help='Background file type, used only if not "default"')
parser.add_argument('--max_strand', action='store_true', help='If matches on both strands at a given position satisfy the output threshold, only report the match for the strand with the higher score')
parser.add_argument('--max_stored_scores', dest='max_stored_scores', type=int, help='Maximum score count to store')
parser.
|
add_argument('--motif', dest='motifs', action='append', default=[], help='Specify motif by id')
parser.add_argument('--motif_pseudo', dest='motif_pseudo', type=float, default=0.1, help='Pseudocount to add to counts in motif matrix')
parser.add_argument('--no_qvalue', action='store_true', help='Do not compute a q-value for each p-value')
parser.add_argument('--norc', action='store_true', help='Do not score the reverse complement DNA strand')
parser.add_argument('--output_path', dest='output_path', help='Output files directory')
parser.add_argument('--parse_genomic_coord', action='store_true', help='Check each sequence header for UCSC style genomic coordinates')
parser.add_argument('--qv_thresh', action='store_true', help='Use q-values for the output threshold')
parser.add_argument('--thresh', dest='thresh', type=float, help='p-value threshold')
parser.add_argument('--gff_output', dest='gff_output', help='Gff output file')
parser.add_argument('--html_output', dest='html_output', help='HTML output file')
parser.add_argument('--interval_output', dest='interval_output', help='Interval output file')
parser.add_argument('--txt_output', dest='txt_output', help='Text output file')
parser.add_argument('--xml_output', dest='xml_output', help='XML output file')
args = parser.parse_args()
fimo_cmd_list = ['fimo']
if args.options_type == 'advanced':
fimo_cmd_list.append('--alpha %4f' % args.alpha)
if args.bgfile is not None:
fimo_cmd_list.append('--bgfile "%s"' % args.bgfile)
if args.max_strand:
fimo_cmd_list.append('--max-strand')
fimo_cmd_list.append('--max-stored-scores %d' % args.max_stored_scores)
if len(args.motifs) > 0:
for motif in args.motifs:
fimo_cmd_list.append('--motif "%s"' % motif)
fimo_cmd_list.append('--motif-pseudo %4f' % args.motif_pseudo)
if args.no_qvalue:
fimo_cmd_list.append('--no-qvalue')
if args.norc:
fimo_cmd_list.append('--norc')
if args.parse_genomic_coord:
fimo_cmd_list.append('--parse-genomic-coord')
if args.qv_thresh:
fimo_cmd_list.append('--qv-thresh')
fimo_cmd_list.append('--thresh %4f' % args.thresh)
if args.input_psp is not None:
fimo_cmd_list.append('--psp "%s"' % args.input_psp)
if args.input_prior_dist is not None:
fimo_cmd_list.append('--prior-dist "%s"' % args.input_prior_dist)
fimo_cmd_list.append('--o "%s"' % (args.output_path))
fimo_cmd_list.append('--verbosity 1')
fimo_cmd_list.append(args.input_motifs)
fimo_cmd_list.append(args.input_fasta)
fimo_cmd = ' '.join(fimo_cmd_list)
try:
tmp_stderr = tempfile.NamedTemporaryFile()
proc = subprocess.Popen(args=fimo_cmd, shell=True, stderr=tmp_stderr)
returncode = proc.wait()
tmp_stderr.seek(0)
stderr = ''
try:
while True:
stderr += tmp_stderr.read(BUFFSIZE)
if not stderr or len(stderr) % BUFFSIZE != 0:
break
except OverflowError:
pass
if returncode != 0:
stop_err(stderr)
except Exception as e:
stop_err('Error running FIMO:\n%s' % str(e))
shutil.move(os.path.join(args.output_path, 'fimo.txt'), args.txt_output)
shutil.move(os.path.join(args.output_path, 'fimo.gff'), args.gff_output)
shutil.move(os.path.join(args.output_path, 'fimo.xml'), args.xml_output)
shutil.move(os.path.join(args.output_path, 'fimo.html'), args.html_output)
out_file = open(args.interval_output, 'wb')
out_file.write("#%s\n" % "\t".join(("chr", "start", "end", "pattern name", "score", "strand", "matched sequence", "p-value", "q-value")))
for line in open(args.txt_output):
if line.startswith('#'):
continue
fields = line.rstrip("\n\r").split("\t")
start, end = int(fields[2]), int(fields[3])
sequence = fields[7]
if start > end:
# Flip start and end and set strand.
start, end = end, start
strand = "-"
# We want sequences relative to strand; FIMO always provides + stranded sequence.
sequence = dna_reverse_complement(sequence)
else:
strand = "+"
# Make 0-based start position.
start -= 1
out_file.write("%s\n" % "\t".join([fields[1], str(start), str(end), fields[0], fields[4], strand, sequence, fields[5], fields[6]]))
out_file.close()
|
wakalixes/sqldataplot
|
plugins/pluginFitSigmoidal.py
|
Python
|
gpl-2.0
| 1,665
| 0.037838
|
#--------------------------------------------------
# Revision = $Rev: 20 $
# Date = $Date: 2011-08-05 20:42:24 +0200 (Fri, 05 Aug 2011) $
# Author = $Author: stefan $
#--------------------------------------------------
from pluginInterfaces import PluginFit, Parameter,leastsqFit
import numpy as np
class PluginFitThreeBodyBeta(PluginFit):
def __init__(self):
pass
def fit(self,array,errarray,param,xmin=0,xmax=0, fitAxes=[]):
"""return the data that is needed for plotting the fitting result"""
"""0...a, 1...xc, 2...k, 3...y0"""
self.params = [Parameter(v) for v in param]
def f(x): return self.params[0]()/(1+np.exp(-(x-self.params[1]())/self.params[2]()))+self.params[3]()
self.simpleFitAllAxes(f,array,errarray,xmin,xmax, fitAxes)
return self.generateDataFromParameters(f,[np.amin(array[0,:]),np.amax(array[0,:])], np.size(fitAxes)+1, xmin, xmax, fitAxes)
def getI
|
nitialParameters(self,data):
"""find the best initial values and return them"""
dx = np.abs(data[0,0] - data[0,-1])
mi = np.amin(data[1,:])
ma = np.amax(data[1,:])
xc = (np.amax(data[0,:])-np.amin(data[0,:]))/2+np.amin(data[0,:])
return [ma-mi,xc,dx*2,mi]
def getParameters(self):
"""return the fit parameters"""
return np.array(["a","xc","dx","y0"])
def getFitModelStr(s
|
elf):
"""return a string of the implemented fitting model, i.e. 'linear fit (y=A*x +B)'"""
return "Sigmoidal"
def getResultStr(self):
"""return a special result, i.e. 'Frequency = blabla'"""
return "nothing fitted"
|
damnfine/mezzanine
|
mezzanine/pages/managers.py
|
Python
|
bsd-2-clause
| 3,992
| 0
|
from __future__ import unicode_literals
from future.builtins import range
from mezzanine.conf import settings
from mezzanine.core.managers import DisplayableManager
from mezzanine.utils.urls import home_slug
class PageManager(DisplayableManager):
def published(self, for_user=None, include_login_required=False):
"""
Override ``DisplayableManager.published`` to exclude
pages with ``login_required`` set to ``True``. if the
user is unauthenticated and the setting
``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` is ``False``.
The extra ``include_login_required`` arg allows callers to
override the ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED``
behaviour in special cases where they want to deal with the
``login_required`` field manually, such as the case in
``PageMiddleware``.
"""
published = super(PageManager, self).published(for_user=for_user)
unauthenticated = for_user and not for_user.is_authenticated()
if (unauthenticated and not include_login_required and
not settings.PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED):
published = published.exclude(login_required=True)
return published
def with_ascendants_for_slug(self, slug, **kwargs):
"""
Given a slug, returns a list of pages from ascendants to
descendants, that form the parent/child page relationships
for that slug. The main concern is to do this in a single
database query rather than querying the database for parents
of a given page.
Primarily used in ``PageMiddleware`` to provide the current
page, which in the case of non-page views, won't match the
slug exactly, but will likely match a page that has been
created for linking to the entry point for the app.
Also used within ``Page.get_ascendants``, which gets called
in the ``pages.views`` view, for building a list of possible
templates that can be used for the page.
If a valid chain of pages is found, we also assign the pages
to the ``page._ascendants`` attr of the main/first/deepest
page, so that when its ``get_ascendants`` method is called,
the ascendants chain can be re-used without querying the
database again. This occurs at least once, given the second
use-case described above.
"""
if slug == "/":
slugs = [home_slug()]
else:
# Create a list of slugs within this slug,
# eg: ['about', 'about
|
/team', 'about/team/mike']
parts = slug.split("/")
slugs = ["/".join(parts[:i]) for i in range(1, len(parts) + 1)]
# Find the deepest page that matches one of our slugs.
# Sorting by "-slug" should ensure th
|
at the pages are in
# descendant -> ascendant order.
pages_for_user = self.published(**kwargs)
pages = list(pages_for_user.filter(slug__in=slugs).order_by("-slug"))
if not pages:
return []
# Check to see if the other pages retrieved form a valid path
# in the page tree, i.e. pages[0].parent == pages[1],
# pages[1].parent == pages[2], and so on. If they do, assign
# the ascendants to the main/first/deepest page, so that it
# can be re-used on calls to its get_ascendants method.
pages[0]._ascendants = []
for i, page in enumerate(pages):
try:
parent = pages[i + 1]
except IndexError:
# IndexError indicates that this is the last page in
# the list, so it should have no parent.
if page.parent_id:
break # Invalid parent
else:
if page.parent_id != parent.id:
break # Invalid parent
else:
# Valid parents
pages[0]._ascendants = pages[1:]
return pages
|
QuLogic/iris
|
lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py
|
Python
|
gpl-3.0
| 20,624
| 0
|
# (C) British Crown Copyright 2015 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the function
:func:`iris.analysis.cartography.rotate_winds`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import numpy.ma as ma
import cartopy.crs as ccrs
from iris.analysis.cartography import rotate_winds, unrotate_pole
from iris.cube import Cube
from iris.coords import DimCoord, AuxCoord
import iris.coord_systems
def uv_cubes(x=None, y=None):
"""Return u, v cubes with a grid in a rotated pole CRS."""
cs = iris.coord_systems.RotatedGeogCS(grid_north_pole_latitude=37.5,
grid_north_pole_longitude=177.5)
if x is None:
x = np.linspace(311.9, 391.1, 6)
if y is None:
y = np.linspace(-23.6, 24.8, 5)
x2d, y2d = np.meshgrid(x, y)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
lon = DimCoord(x, standard_name='grid_longitude', units='degrees',
coord_system=cs)
lat = DimCoord(y, standard_name='grid_latitude', units='degrees',
coord_system=cs)
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(lat.copy(), 0)
cube.add_dim_coord(lon.copy(), 1)
return u_cube, v_cube
def uv_cubes_3d(ref_cube, n_realization=3):
"""
Return 3d u, v cubes with a grid in a rotated pole CRS taken from
the provided 2d cube, by adding a realization dimension
coordinate bound to teh zeroth dimension.
"""
lat = ref_cube.coord('grid_latitude')
lon = ref_cube.coord('grid_longitude')
x2d, y2d = np.meshgrid(lon.points, lat.points)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
# Multiply slices by factor to give variation over 0th dim.
factor = np.arange(1, n_realization + 1).reshape(n_realization, 1, 1)
u = factor * u
v = factor * v
realization = DimCoord(np.arange(n_realization), 'realization')
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(realization.copy(), 0)
cube.add_dim_coord(lat.copy(), 1)
cube.add_dim_coord(lon.copy(), 2)
return u_cube, v_cube
class TestPrerequisites(tests.IrisTest):
def test_different_coord_systems(self):
u, v = uv_cubes()
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError, 'Coordinates differ between u and v cubes'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_xy_coord_systems(self):
u, v = uv_cubes()
u.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError,
'Coordinate systems of x and y coordinates differ'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_shape(self):
x = np.linspace(311.9, 391.1, 6)
y = np.linspace(-23.6, 24.8, 5)
u, _ = uv_cubes(x, y)
_, v = uv_cubes(x[:-1], y)
with self.assertRaisesRegexp(ValueError, 'same shape'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_xy_dimensionality(self):
u, v = uv_cubes()
# Replace 1d lat with 2d lat.
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.mesh
|
grid(x, y)
lat_2d = AuxCoord(y2d, 'grid_latitude', units='degrees',
coord_system=u.coord('grid_latitude').coord_system)
for cube in (u, v):
|
cube.remove_coord('grid_latitude')
cube.add_aux_coord(lat_2d.copy(), (0, 1))
with self.assertRaisesRegexp(
ValueError,
'x and y coordinates must have the same number of dimensions'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_dim_mapping(self):
x = np.linspace(311.9, 391.1, 3)
y = np.linspace(-23.6, 24.8, 3)
u, v = uv_cubes(x, y)
v.transpose()
with self.assertRaisesRegexp(ValueError, 'Dimension mapping'):
rotate_winds(u, v, iris.coord_systems.OSGB())
class TestAnalyticComparison(tests.IrisTest):
@staticmethod
def _unrotate_equation(rotated_lons, rotated_lats,
rotated_us, rotated_vs, pole_lon, pole_lat):
# Perform a rotated-pole 'unrotate winds' transformation on arrays of
# rotated-lat, rotated-lon, u and v.
# This can be defined as an analytic function : cf. UMDP015
# Work out the rotation angles.
lambda_angle = np.radians(pole_lon - 180.0)
phi_angle = np.radians(90.0 - pole_lat)
# Get the locations in true lats+lons.
trueLongitude, trueLatitude = unrotate_pole(rotated_lons,
rotated_lats,
pole_lon,
pole_lat)
# Calculate inter-coordinate rotation coefficients.
cos_rot = (np.cos(np.radians(rotated_lons)) *
np.cos(np.radians(trueLongitude) - lambda_angle) +
np.sin(np.radians(rotated_lons)) *
np.sin(np.radians(trueLongitude) - lambda_angle) *
np.cos(phi_angle))
sin_rot = -((np.sin(np.radians(trueLongitude) - lambda_angle) *
np.sin(phi_angle)) /
np.cos(np.radians(rotated_lats)))
# Matrix-multiply to rotate the vectors.
u_true = rotated_us * cos_rot - rotated_vs * sin_rot
v_true = rotated_vs * cos_rot + rotated_us * sin_rot
return u_true, v_true
def _check_rotated_to_true(self, u_rot, v_rot, target_cs, **kwds):
# Run test calculation (numeric).
u_true, v_true = rotate_winds(u_rot, v_rot, target_cs)
# Perform same calculation via the reference method (equations).
cs_rot = u_rot.coord('grid_longitude').coord_system
pole_lat = cs_rot.grid_north_pole_latitude
pole_lon = cs_rot.grid_north_pole_longitude
rotated_lons = u_rot.coord('grid_longitude').points
rotated_lats = u_rot.coord('grid_latitude').points
rotated_lons_2d, rotated_lats_2d = np.meshgrid(
rotated_lons, rotated_lats)
rotated_u, rotated_v = u_rot.data, v_rot.data
u_ref, v_ref = self._unrotate_equation(rotated_lons_2d,
rotated_lats_2d,
rotated_u, rotated_v,
pole_lon, pole_lat)
# Check that all the numerical results are within given tolerances.
self.assertArrayAllClose(u_true.data, u_ref, **kwds)
self.assertArrayAllClose(v_true.data, v_ref, **kwds)
def test_rotated_to_true__small(self):
# Check for a small f
|
wsy1607/Data-Analysis-of-Campus-Crime-Index
|
website/plugins/extract_toc/extract_toc.py
|
Python
|
mit
| 984
| 0.001016
|
"""
Extract Table of Content
===============
|
=========
This plugin allows you to extract table of contents (ToC) from article.content
and place it in its own article.toc variable.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
def extract_toc(content):
if isinstance(content, contents.Static):
|
return
soup = BeautifulSoup(content._content,'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = ''
# if it is a Markdown file
if extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
# else if it is a reST file
elif extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
def register():
signals.content_object_init.connect(extract_toc)
|
UManPychron/pychron
|
pychron/furnace/firmware/server.py
|
Python
|
apache-2.0
| 5,682
| 0.001936
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance
# ============= standard library imports ========================
# ============= local library imports ==========================
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet.protocol import Factory
from pychron.headless_loggable import HeadlessLoggable
from pychron.tx.protocols.service import ServiceProtocol
class FurnaceFirmwareProtocol(ServiceProtocol):
def __init__(self, manager, addr):
self._manager = manager
self._addr = addr
ServiceProtocol.__init__(self)
misc_services = (('GetLabTemperature', manager.get_lab_temperature),
('GetLabHumidity', manager.get_lab_humidity),
('SetFrameRate', manager.set_frame_rate),
('GetVersion', manager.get_version),
('GetDIState', manager.get_di_state),
('GetHeartBeat', manager.get_heartbeat),
('GetFullSummary', manager.get_full_summary))
controller_services = (('GetTemperature', manager.get_temperature),
('GetSetpoint', manager.get_setpoint),
('SetSetpoint', manager.set_setpoint),
('GetProcessValue', manager.get_temperature),
('GetPercentOutput', manager.get_percent_output),
('GetFurnaceSummary', manager.get_furnace_summary),
('SetPID', manager.set_pid))
valve_services = (('Open', manager.open_switch),
('Close', manager.close_switch),
('GetIndicatorState', manager.get_indicator_state),
# ('GetChannelDOState', manager.get_channel_do_state),
('GetChannelState', manager.get_channel_state),
('GetIndicatorComponentStates', manager.get_indicator_component_states))
dump_services = (('LowerFunnel', manager.lower_funnel),
('RaiseFunnel', manager.raise_funnel),
('InUpPosition', manager.is_funnel_up),
('InDownPosition', manager.is_funnel_down),
('EnergizeMagnets', manager.energize_magnets),
('IsEnergized', manager.is_energized),
|
('RotaryDumperMoving', manager.rotary_dumper_moving),
('DenergizeMagnets', manager.denergize_magnets),
('MoveAbsolute', manager.move_absolute),
('MoveRelative', manager.move_relative),
('GetPosition', manager.get_po
|
sition),
('Slew', manager.slew),
('Stalled', manager.stalled),
('SetHome', manager.set_home),
('StopDrive', manager.stop_drive),
('Moving', manager.moving),
('StartJitter', manager.start_jitter),
('StopJitter', manager.stop_jitter))
bakeout_services = (('GetBakeoutSetpoint', manager.get_bakeout_setpoint),
('SetBakeoutControlMode', manager.set_bakeout_control_mode),
('GetBakeoutTemperature', manager.get_bakeout_temperature),
('SetBakeoutClosedLoopSetpoint', manager.set_bakeout_setpoint),
('GetBakeoutTempPower', manager.get_bakeout_temp_and_power))
gauge_services = (('GetPressure', manager.get_gauge_pressure),)
for s in (misc_services, controller_services, valve_services, dump_services,
bakeout_services, gauge_services):
self._register_services(s)
class FirmwareFactory(Factory):
def __init__(self, manager):
self._manager = manager
def buildProtocol(self, addr):
return FurnaceFirmwareProtocol(self._manager, addr)
class FirmwareServer(HeadlessLoggable):
manager = Instance('pychron.furnace.firmware.manager.FirmwareManager')
def bootstrap(self, port=None, **kw):
self.debug('bootstrap')
self._load_config(port)
self.debug('starting reactor')
reactor.run()
def _load_config(self, port):
self.debug('load config')
if port is None:
port = 8000
self.add_endpoint(port, FirmwareFactory(self.manager))
def add_endpoint(self, port, factory):
self.debug('add endbpoint port={} factory={}'.format(port, factory.__class__.__name__))
endpoint = TCP4ServerEndpoint(reactor, port)
endpoint.listen(factory)
# ============= EOF =============================================
|
matt-gardner/deep_qa
|
deep_qa/models/multiple_choice_qa/question_answer_similarity.py
|
Python
|
apache-2.0
| 5,186
| 0.004435
|
from typing import Dict
from overrides import overrides
from keras import backend a
|
s K
from keras.layers import Dense, Dropout, Input
from ...data.instances.multiple_choice_qa import QuestionAnswerInstance
from ...layers.wrappers import EncoderWrapper
from ...layers.attention import Attention
from ...training import TextTrainer
from ...common.params import Params
from ...train
|
ing.models import DeepQaModel
class QuestionAnswerSimilarity(TextTrainer):
"""
A TextTrainer that takes a question and several answer options as input, encodes the word
sequences using a sentence encoder, optionally passes the question encoding through some dense
layers, then selects the option that is most similar to the final question encoding.
This assumes that you can get the parameters of the model to learn whatever associations
between words in the question and words in the answer are necessary to select the correct
choice. There is no notion of external memory or background knowledge here.
"""
def __init__(self, params: Params):
self.num_hidden_layers = params.pop('num_hidden_layers', 1)
self.hidden_layer_width = params.pop('hidden_layer_width', 50)
self.hidden_layer_activation = params.pop('hidden_layer_activation', 'relu')
self.max_answer_length = params.pop('max_answer_length', None)
self.num_options = params.pop('num_options', None)
super(QuestionAnswerSimilarity, self).__init__(params)
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass both questions and answers through an embedding
layer, then through an encoder. Then we'll pass the encoded question through a some dense
layers, and compare the similar to the encoded answers.
"""
# First we create input layers and pass the inputs through an embedding.
question_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="sentence_input")
answer_input = Input(shape=(self.num_options,) + self._get_sentence_shape(self.max_answer_length),
dtype='int32',
name="answer_input")
question_embedding = self._embed_input(question_input)
answer_embedding = self._embed_input(answer_input)
# Then we encode the question and answers using some encoder.
question_encoder = self._get_encoder()
encoded_question = question_encoder(question_embedding)
# TODO(matt): make the dropout a parameter (should probably be "encoder_dropout", in
# TextTrainer).
regularized_encoded_question = Dropout(0.2)(encoded_question)
# This needs to use a new encoder because we can't compile the same LSTM with two different
# padding lengths... If you really want to use the same LSTM for both questions and
# answers, pad the answers to the same dimension as the questions, replacing
# self.max_answer_length with self.num_sentence_words everywhere.
answer_encoder = EncoderWrapper(question_encoder, name="answer_encoder")
encoded_answers = answer_encoder(answer_embedding)
# Then we pass the question through some hidden (dense) layers.
hidden_input = regularized_encoded_question
for i in range(self.num_hidden_layers):
hidden_layer = Dense(units=self.hidden_layer_width,
activation=self.hidden_layer_activation,
name='question_hidden_layer_%d' % i)
hidden_input = hidden_layer(hidden_input)
projection_layer = Dense(units=K.int_shape(encoded_answers)[-1],
activation='linear',
name='question_projection')
projected_input = projection_layer(hidden_input)
# Lastly, we compare the similarity of the question to the answer options. Note that this
# layer has no parameters, so it doesn't need to be put into self._init_layers().
softmax_output = Attention(name='answer_similarity_softmax')([projected_input, encoded_answers])
return DeepQaModel(inputs=[question_input, answer_input], outputs=softmax_output)
def _instance_type(self):
return QuestionAnswerInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
padding_lengths = super(QuestionAnswerSimilarity, self).get_padding_lengths()
padding_lengths['num_options'] = self.num_options
padding_lengths['answer_length'] = self.max_answer_length
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
super(QuestionAnswerSimilarity, self)._set_padding_lengths(padding_lengths)
if self.max_answer_length is None:
self.max_answer_length = padding_lengths['answer_length']
if self.num_options is None:
self.num_options = padding_lengths['num_options']
@overrides
def _set_padding_lengths_from_model(self):
self.num_sentence_words = self.model.get_input_shape_at(0)[1]
# TODO(matt): implement this correctly
|
aaronbassett/DisposableEmailChecker
|
build_list.py
|
Python
|
bsd-3-clause
| 141
| 0.007092
|
emails = sorted(set([line.strip() for line in open("email_do
|
mains.txt")]))
for email in emails:
print("'{email}',".format(emai
|
l=email))
|
baixuexue123/note
|
python/concurrency/gevent/test_pool.py
|
Python
|
bsd-2-clause
| 999
| 0.002275
|
# -*- coding: utf-8 -*-
"""
Greenlet具有确定性. 在相同配置相同输入的情况下, 它们总是会产生相同的输出.
下面就有例子, 我们在multiprocessing的pool之间执行一系列的任务, 与在gevent的pool之间执行作比较.
"""
import time
from multiprocessing.pool import Pool
def echo(i):
time.sleep(0.001)
return i
# Non Deterministic Process Pool
p = Pool(10)
run1 = [a for a in p.imap_unordered(echo, xrange(10))]
run2 = [a for a in p.imap_unordered(echo, xrange(10))]
run3 = [a for a in p.imap_unordered(echo, xrange(10)
|
)]
run4 = [a for a in p.imap_unordered(echo, xrange(1
|
0))]
print(run1 == run2 == run3 == run4)
# Deterministic Gevent Pool
from gevent.pool import Pool
p = Pool(10)
run1 = [a for a in p.imap_unordered(echo, xrange(10))]
run2 = [a for a in p.imap_unordered(echo, xrange(10))]
run3 = [a for a in p.imap_unordered(echo, xrange(10))]
run4 = [a for a in p.imap_unordered(echo, xrange(10))]
print(run1 == run2 == run3 == run4)
|
intel-hpdd/intel-manager-for-lustre
|
chroma_core/migrations/0025_createsnapshotjob_destroysnapshotjob.py
|
Python
|
mit
| 2,819
| 0.003193
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-09-10 14:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("chroma_core", "0024_mountsnapshotjob_unmountsnapshotjob"),
]
operations = [
migrations.CreateModel(
name="CreateSnapshotJob",
fields=[
(
"job_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Job",
),
),
("fqdn", models.CharField(help_text=b"MGS host to create the snapshot on", max_length=256)),
("fsname", models.CharField(help_text=b"Lustre filesystem name", max_length=8)),
("name", models.CharField(help_text=b"Snapshot to create", max_length=64)),
(
"comment",
models.CharField(help_text=b"Optional comment for the snapshot", max_length=1024, null=True),
|
),
(
"use_barrier",
models.BooleanField(
default=False,
help_text=b"Set write barrier before creating snapshot. The default value is False",
),
),
],
options={
"ordering": ["id"],
},
bases=("chroma_cor
|
e.job",),
),
migrations.CreateModel(
name="DestroySnapshotJob",
fields=[
(
"job_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Job",
),
),
("fqdn", models.CharField(help_text=b"MGS host to destroy the snapshot on", max_length=256)),
("fsname", models.CharField(help_text=b"Lustre filesystem name", max_length=8)),
("name", models.CharField(help_text=b"Snapshot to destroy", max_length=64)),
("force", models.BooleanField(default=False, help_text=b"Destroy the snapshot with force")),
],
options={
"ordering": ["id"],
},
bases=("chroma_core.job",),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.