repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mazaclub/mazabot-core
|
plugins/ChannelStats/test.py
|
Python
|
bsd-3-clause
| 4,460
| 0.000897
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.ircdb as ircdb
class ChannelStatsTestCase(ChannelPluginTestCase):
plugins = ('ChannelStats', 'User')
def setUp(self):
ChannelPluginTestCase.setUp(self)
self.prefix = 'foo!bar@baz'
self.nick = 'foo'
self.irc.feedMsg(ircmsgs.privmsg(self.irc.nick,
'register foo bar',
prefix=self.prefix))
_ = self.irc.takeMsg()
chanop = ircdb.makeChannelCapability(self.channel, 'op')
ircdb.users.getUser(self.nick).addCapability(chanop)
def test(self):
self.assertNotError('channelstats')
self.assertNotError('channelstats')
self.assertNotError('channelstats')
def testStats(self):
self.assertError('channelstats stats %s' % self.nick)
self.assertNotError('channelstats stats %s' % self.nick)
self.assertNotError('channelstats stats %s' % self.nick.upper())
self.assertNotError('channelstats stats')
self.assertRegexp('channelstats stats', self.nick)
def testSelfStats(self):
self.assertError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick.upper())
self.assertRegexp('channelstats rank chars', self.irc.nick)
u = ircdb.users.getUser(self.prefix)
u.addCapability(ircdb.makeChannelCapability(self.channel, 'op'))
ircdb.users.setUser(u)
try:
conf.supybot.plugins.ChannelStats.selfStats.setValue(False)
m1 = self.getMsg('channelstats stats %s' % self.irc.nick)
m2 = self.getMsg('channelstats stats %s' % self.irc.nick)
self.assertEqu
|
al(m1.args[1], m2.args[1])
finally:
|
conf.supybot.plugins.ChannelStats.selfStats.setValue(True)
def testNoKeyErrorStats(self):
self.assertNotRegexp('stats sweede', 'KeyError')
def testRank(self):
self.assertError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick)
self.assertNotError('channelstats stats %s' % self.irc.nick.upper())
self.assertNotError('channelstats stats %s' % self.nick)
self.assertNotError('channelstats stats %s' % self.nick.upper())
self.assertNotError('channelstats stats')
self.assertNotError('channelstats rank chars / msgs')
self.assertNotError('channelstats rank kicks/kicked') # Tests inf
self.assertNotError('channelstats rank log(msgs)')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
pytorch/fairseq
|
fairseq/data/encoders/nltk_tokenizer.py
|
Python
|
mit
| 755
| 0
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in
|
the root directory of this source tree.
from fairseq.data.e
|
ncoders import register_tokenizer
from fairseq.dataclass import FairseqDataclass
@register_tokenizer("nltk", dataclass=FairseqDataclass)
class NLTKTokenizer(object):
def __init__(self, *unused):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError("Please install nltk with: pip install nltk")
def encode(self, x: str) -> str:
return " ".join(self.word_tokenize(x))
def decode(self, x: str) -> str:
return x
|
Vanuan/gpx_to_road_map
|
biagoni2012/gpsmatcher.py
|
Python
|
apache-2.0
| 2,740
| 0.016788
|
from viterbi import Viterbi
from rtree import Rtree
from spatialfunclib import *
class GPSMatcher:
def __init__(self, hmm, emission_probability, constraint_length=10, MAX_DIST=500, priors=None, smallV=0.00000000001):
# initialize spatial index
self.previous_obs = None
if priors == None:
priors=dict([(state,1.0/len(hmm)) for state in hmm])
state_spatial_index = Rtree()
unlocated_states = []
id_to_state = {}
id = 0
for state in hmm:
geom=self.geometry_of_state(state)
if not geom:
unlocated_states.append(state)
else:
((lat1,lon1),(lat2,lon2))=geom
state_spatial_index.insert(id,
(min(lon1, lon2), min(lat1, lat2),
max(lon1, lon2), max(lat1, lat2)))
id_to_state[id]=state
id=id+1
def candidate_states(obs): #was (lat,lon) in place of obs
geom = self.geometry_of_observation(obs)
if geom == None:
return hmm.keys()
else:
(lat,lon)=geom
nearby_states = state_spatial_index.intersection((lon-MAX_DIST/METERS_PER_DEGREE_LONGITUDE,
|
lat-MAX_DIST/METERS_PER_DEGREE_LATITUDE,
lon+MAX_DIST/METERS_PER_DEGREE_LONGITUDE,
lat+MAX_DIST/METERS_PER_DEGREE_LATITUDE))
candidates = [id_to_state[id] for id in nearby_states]+unlocated_states
return candidates
self.viterbi = Viterbi(hmm,emission_probability,
|
constraint_length=constraint_length,
priors=priors,
candidate_states=candidate_states,
smallV=smallV)
def step(self,obs,V,p):
if self.previous_obs != None:
for int_obs in self.interpolated_obs(self.previous_obs, obs):
V,p = self.viterbi.step(int_obs,V,p)
V,p = self.viterbi.step(obs,V,p)
self.previous_obs = obs
return V,p
def interpolated_obs(self,prev,obs):
return []
def geometry_of_observation(self, obs):
return obs
def geometry_of_state(self, state):
""" Subclasses should override this method to return the geometry of a given state, typically an edge."""
if state == 'unknown': return None
else:
return state
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_algos/naivebayes/pyunit_prostateNB.py
|
Python
|
apache-2.0
| 836
| 0.008373
|
import sys
sys.path.insert(1, "../../../")
import h2o
def nb_prostate(ip, port):
print "Importing prostate.csv data..."
prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate.csv"))
print "Converting CAPSULE, RACE, DCAPS, and DPROS to categorical"
prostate['CAPSULE'] = prostate['CAPSULE'].asfactor()
prostate['RACE'] = prostate['CAPSULE'].asfactor()
|
prostate['DCAPS'] = prostate['DCAPS'].asfactor()
prostate['DPROS'] = prostate['DPROS'].asfactor()
print "Compare with Naive Bayes when x = 3:9, y = 2"
prostate_nb = h2o.naive_bayes(x=prostate[2:9], y=prostate[1], l
|
aplace = 0)
prostate_nb.show()
print "Predict on training data"
prostate_pred = prostate_nb.predict(prostate)
prostate_pred.head()
if __name__ == "__main__":
h2o.run_test(sys.argv, nb_prostate)
|
UTAlan/ginniBeam.net
|
gin/contact/models.py
|
Python
|
gpl-2.0
| 189
| 0.015873
|
from django import forms
class ContactForm(forms.Form):
|
name = forms.CharField(max_length=100)
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea
|
)
|
arth-co/shoop
|
shoop_tests/utils/test_analog.py
|
Python
|
agpl-3.0
| 837
| 0
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserve
|
d.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db import models
from shoop.utils.analog import define_log_model, BaseLogEntry
class FakeModel(models.Model):
pass
def test_analog():
FakeModelLogEntry = define_log_model(FakeModel)
assert FakeModelL
|
ogEntry.__module__ == FakeModel.__module__
assert FakeModelLogEntry._meta.get_field("target").rel.to is FakeModel
assert FakeModel.log_entries.related.model is FakeModel
assert FakeModel.log_entries.related.related_model is FakeModelLogEntry
assert issubclass(FakeModelLogEntry, BaseLogEntry)
assert isinstance(FakeModelLogEntry(), BaseLogEntry)
|
benjamindeleener/scad
|
scripts/sct_orientation.py
|
Python
|
mit
| 11,010
| 0.003724
|
#!/usr/bin/env python
#########################################################################################
#
# Get or set orientation of nifti 3d or 4d data.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechn
|
ique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad
# Modified: 2014-10-18
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
import getopt
import commands
import sct_utils as sct
import time
# DEFAULT PARAMETERS
class Param:
## The constructor
def _
|
_init__(self):
self.debug = 0
self.fname_data = ''
self.fname_out = ''
self.orientation = ''
self.list_of_correct_orientation = 'RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'
self.change_header = ''
self.verbose = 0
self.remove_tmp_files = 1
# main
#=======================================================================================================================
def main():
# Parameters for debug mode
if param.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
# get path of the testing data
status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
param.fname_data = path_sct_data+'/dmri/dwi_moco_mean.nii.gz'
param.orientation = ''
param.change_header = ''
param.remove_tmp_files = 0
param.verbose = 1
else:
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'hi:o:r:s:a:v:')
except getopt.GetoptError:
usage()
if not opts:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in '-i':
param.fname_data = arg
elif opt in '-o':
param.fname_out = arg
elif opt in '-r':
param.remove_tmp_files = int(arg)
elif opt in '-s':
param.orientation = arg
elif opt in '-t':
param.threshold = arg
elif opt in '-a':
param.change_header = arg
elif opt in '-v':
param.verbose = int(arg)
# run main program
get_or_set_orientation()
# get_or_set_orientation
#=======================================================================================================================
def get_or_set_orientation():
fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
# display usage if a mandatory argument is not provided
if param.fname_data == '':
sct.printv('ERROR: All mandatory arguments are not provided. See usage.', 1, 'error')
# check existence of input files
sct.printv('\ncheck existence of input files...', param.verbose)
sct.check_file_exist(param.fname_data, param.verbose)
# find what to do
if param.orientation == '' and param.change_header is '':
todo = 'get_orientation'
else:
todo = 'set_orientation'
# check if orientation is correct
if check_orientation_input():
sct.printv('\nERROR in '+os.path.basename(__file__)+': orientation is not recognized. Use one of the following orientation: '+param.list_of_correct_orientation+'\n', 1, 'error')
sys.exit(2)
# display input parameters
sct.printv('\nInput parameters:', param.verbose)
sct.printv(' data ..................'+param.fname_data, param.verbose)
# Extract path/file/extension
path_data, file_data, ext_data = sct.extract_fname(param.fname_data)
if param.fname_out == '':
# path_out, file_out, ext_out = '', file_data+'_'+param.orientation, ext_data
fname_out = path_data+file_data+'_'+param.orientation+ext_data
else:
fname_out = param.fname_out
# create temporary folder
sct.printv('\nCreate temporary folder...', param.verbose)
path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+path_tmp, param.verbose)
# Copying input data to tmp folder and convert to nii
# NB: cannot use c3d here because c3d cannot convert 4D data.
sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
sct.run('cp '+param.fname_data+' '+path_tmp+'data'+ext_data, param.verbose)
# go to tmp folder
os.chdir(path_tmp)
# convert to nii format
sct.run('fslchfiletype NIFTI data', param.verbose)
# Get dimensions of data
sct.printv('\nGet dimensions of data...', param.verbose)
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data.nii')
sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), param.verbose)
# if 4d, loop across the data
if nt == 1:
if todo == 'set_orientation':
# set orientation
sct.printv('\nChange orientation...', param.verbose)
if param.change_header is '':
set_orientation('data.nii', param.orientation, 'data_orient.nii')
else:
set_orientation('data.nii', param.change_header, 'data_orient.nii', True)
elif todo == 'get_orientation':
# get orientation
sct.printv('\nGet orientation...', param.verbose)
sct.printv(get_orientation('data.nii'), 1)
else:
# split along T dimension
sct.printv('\nSplit along T dimension...', param.verbose)
sct.run(fsloutput+'fslsplit data data_T', param.verbose)
if todo == 'set_orientation':
# set orientation
sct.printv('\nChange orientation...', param.verbose)
for it in range(nt):
file_data_split = 'data_T'+str(it).zfill(4)+'.nii'
file_data_split_orient = 'data_orient_T'+str(it).zfill(4)+'.nii'
set_orientation(file_data_split, param.orientation, file_data_split_orient)
# Merge files back
sct.printv('\nMerge file back...', param.verbose)
cmd = fsloutput+'fslmerge -t data_orient'
for it in range(nt):
file_data_split_orient = 'data_orient_T'+str(it).zfill(4)+'.nii'
cmd = cmd+' '+file_data_split_orient
sct.run(cmd, param.verbose)
elif todo == 'get_orientation':
sct.printv('\nGet orientation...', param.verbose)
sct.printv(get_orientation('data_T0000.nii'), 1)
# come back to parent folder
os.chdir('..')
# Generate output files
if todo == 'set_orientation':
sct.printv('\nGenerate output files...', param.verbose)
sct.generate_output_file(path_tmp+'data_orient.nii', fname_out)
# Remove temporary files
if param.remove_tmp_files == 1:
sct.printv('\nRemove temporary files...', param.verbose)
sct.run('rm -rf '+path_tmp, param.verbose)
# to view results
if todo == 'set_orientation':
sct.printv('\nDone! To view results, type:', param.verbose)
sct.printv('fslview '+fname_out+' &', param.verbose, 'code')
print
# check_orientation_input
# ==========================================================================================
def check_orientation_input():
"""check if orientation input by user is correct"""
if param.orientation in param.list_of_correct_orientation:
return 0
else:
return -1
# get_orientation (uses FSL)
# ==========================================================================================
def get_orientation(fname):
status, output = sct.run('fslhd '+fname, 0)
# status, output = sct.run('isct_orientation3d -i '+fname+' -get', 0)
# orientation = output[26:]
orientation = output[output.find('sform_xorient')+15:output.find('sform_xorient')+16]+ \
output[output.find('sform_yorient')+15:output.find('sform_yorient')+16]+
|
AndreyBalabanov/python_training_mantisBT
|
fixture/project.py
|
Python
|
apache-2.0
| 1,944
| 0.002572
|
from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
self.project_list_cache = None
def open_page_manage_projects(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, project):
wd = self.app.wd
self.open_page_manage_projects()
wd.find_element_by_xpath("//table[3]/tbody/tr[1]/td/form/input[2]").click()
self.fill_add_rpoject_form(project)
wd.find_element_by_css_selector("input.button").click()
wd.find_element_by_link_text("Proceed").click()
self.project_cache = None
def fill_add_rpoject_form(self, project):
wd = self.app.wd
self.change_field_value("name", project.name)
self.change_field_value("description", project.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete(self, project):
wd = self.app.wd
self.open_page_manage_projects()
wd.find_element_by_link_text(project.name).click()
|
wd.find_element_by_css_selector("form > input.button").click()
wd.find_element_by_css_selector("input.button").click()
def get_project_list(self):
wd = self.app.wd
self.open_page_manage_projects()
projects_list = wd.find_elements_by_xpath("//table[3]/tbody/tr")[2:]
return [Project(name=p.find_element_by_xpath("td[1]").text) for p in projects_list]
def count(self):
wd = self.app.wd
|
self.open_page_manage_projects()
return len(wd.find_elements_by_css_selector(".fa.fa-check.fa-lg"))
project_cache = None
|
opensvn/test
|
src/study/python/backward.py
|
Python
|
gpl-2.0
| 151
| 0
|
#!/usr/bin/env python
def backword(s):
|
length = len(s)
i = -1
t = s
while i >= -length:
t += s
|
[i]
i -= 1
return t
|
qilicun/python
|
python2/PyMOTW-1.132/PyMOTW/shlex/shlex_source.py
|
Python
|
gpl-3.0
| 1,405
| 0.000712
|
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
|
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without
|
fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Including content from other files in the token stream.
"""
__version__ = "$Id$"
#end_pymotw_header
import shlex
text = """This text says to source quotes.txt before continuing."""
print 'ORIGINAL:', repr(text)
print
lexer = shlex.shlex(text)
lexer.wordchars += '.'
lexer.source = 'source'
print 'TOKENS:'
for token in lexer:
print repr(token)
|
openplans/community-almanac
|
communityalmanac/tests/functional/test_users.py
|
Python
|
agpl-3.0
| 208
| 0.004808
|
from communityalmanac.
|
tests import *
class TestUsersController(TestController):
def test_index(self):
response = self.app.get(url(controller='users', action='index'))
# Test response...
| |
Yukarumya/Yukarum-Redfoxes
|
testing/web-platform/tests/tools/pytest/testing/cx_freeze/tox_run.py
|
Python
|
mpl-2.0
| 464
| 0.002155
|
"""
|
Called by tox.ini: uses the generated executable to run the tests in
|
./tests/
directory.
.. note:: somehow calling "build/runtests_script" directly from tox doesn't
seem to work (at least on Windows).
"""
if __name__ == '__main__':
import os
import sys
executable = os.path.join(os.getcwd(), 'build', 'runtests_script')
if sys.platform.startswith('win'):
executable += '.exe'
sys.exit(os.system('%s tests' % executable))
|
thammegowda/algos
|
usc-csci-ml/hw5/src/CSCI567_hw5_fall16.py
|
Python
|
apache-2.0
| 9,745
| 0.003797
|
# coding: utf-8
'''
Name : ThammeGowda Narayanaswamy
USCID: 2074669439
'''
import math
from scipy.stats import multivariate_normal
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from scipy import spatial
from scipy import stats
from pprint import pprint
blob_file = "hw5_blob.csv"
circle_file = "hw5_circle.csv"
def load_points(f_name):
with open(f_name) as f:
res = []
for l in f:
x,y = l.split(",")
res.append([float(x), float(y)])
return np.array(res)
blobs = load_points(blob_file)
circles = load_points(circle_file)
'''
# In[4]:
plt.plot(*zip(*circles), marker='o', color='r', ls='')
plt.show()
plt.plot(*zip(*blobs), marker='o', color='b', ls='')
plt.show()
'''
# In[5]:
def k_means(k, pts, get_indices=False, silent=True, tol=1e-5):
N = len(pts)
assert k <= N
print("K=%d, N=%d" % (k, N))
# pick random k points
pos = set()
while len(pos) < k:
r = np.random.randint(N)
pos.add(r)
centroids = []
for p in pos:
centroids.append(tuple(pts[p]))
change = float('inf')
conv_tol = 1e-5
itr, max_iters = 0, 100
while change > tol and itr < max_iters:
itr += 1
# assign cluster to each point
asgn = {}
indices = {}
for ct in centroids:
asgn[ct] = []
indices[ct] = []
for idx, pt in enumerate(pts):
mindist = float('inf')
a = None
for ct in centroids:
dist = spatial.distance.cdist([ct], [pt])
if dist < mindist:
mindist = dist
a = ct
asgn[a].append(pt)
indices[a].append(idx)
# compute means of each cluster
oldcentr = centroids
centroids = []
for ct, cluster in asgn.items():
centroids.append(tuple(np.array(cluster).mean(axis=0)))
dist_matrix = spatial.distance.cdist(oldcentr, centroids)
# has distance between each pair of {new, old} centroids
# need the diagonal values
change = dist_matrix.trace()
if not silent:
print("Movement in centroids", change)
return indices if get_indices else asgn
# In[6]:
print("# K Means")
colors = ['r', 'g', 'b', 'y', 'c', 'k']
plt.figure(1, figsize=(15, 10))
plt.title("K Means")
ks = {2,3,5}
dss = {'Blobs': blobs, 'Circles': circles}
j = 1
for title, ds in dss.items():
for k in ks:
clstrs = k_means(k, ds)
plt.subplot(2, 3, j)
i = 0
for cnt, cpts in clstrs.items():
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.title("%s , K=%d" % (title, k))
j += 1
plt.show()
# # Kernel
'''
# ## Feature Mapping
# In[7]:
center = [0.0, 0.0]
newdim = sp.spatial.distance.cdist([center], circles).transpose()
clusters = k_means(2, newdim, get_indices=True)
i = 0
for cnt, cpts in clusters.items():
cpts = map(lambda x: circles[x], cpts)
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
'''
# ## Kernel K Means
#
# Kernel used :
# 1 - (radius of x1) / (radius of x2)
#
# It ensures that the smaller radius goes to numerator and larger radius goes to denominator - for symmetry and bounding
print("Kernel K means")
class KernelKMeans(object):
def kernel_matrix(self, data, kernel_func):
''' Computes kernel matrix
: params:
data - data points
kernel_func - kernel function
:returns: nxn matrix
'''
n = data.shape[0]
K = np.zeros((n,n), dtype=float)
for i in range(n):
for j in range(n):
K[i,j] = kernel_func(data[i], data[j])
return K
def cluster(self, X, k, kernel_func, max_itr=100, tol=1e-3):
'''
Clusters the points
:params:
X - data points
k - number of clusters
kernel_func - kernel function that outputs smaller values for points in same cluster
:returns: Nx1 vector of assignments
'''
# N
N = X.shape[0]
# NxN matrix from kernel funnction element wise
K = self.kernel_matrix(X, kernel_func)
# equal weightage to all
cluster_weights = np.ones(N)
# Assignments : random assignments to begin with
A = np.random.randint(k, size=N)
for it in xrange(max_itr): # stuck up between 2 local minimas, abort after maxiter
# N x k matrix that stores distance between every point and cluster center
dist = self.compute_dist(K, k, A, sw=cluster_weights)
oldA, A = A, dist.argmin(axis=1)
# Check if it is conveged
n_same = np.sum(np.abs(A - oldA) == 0)
if 1 - float(n_same) / N < tol:
print "Converged at iteration:", it + 1
break
return A
def compute_dist(self, K, k, A, sw):
"""
Computes Nxk distance matrix using kernel matrix
: params:
K - NxN kernel Matrix
k - number of clusters
A - Nx1 Assignments
sw - sample weights
: returns : Nxk distance matrix
"""
dist = np.zeros((K.shape[0], k))
for cl in xrange(k):
mask = A == cl
if np.sum(mask) == 0:
raise Error("ERROR:cluster '%d' is empty. Looks like we cant make %d clusters" % (cl, k))
N_ = sw[mask].sum()
KK = K[mask][:, mask]
dist[:, cl] += np.sum(np.outer(sw[mask], sw[mask]) * KK / (N_*N_))
dist[:, cl] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / N_
return dist
def distance(x1, x2):
'''Squared Eucledian distance between 2 points
:params:
x1 - point1
x2 - point2
'''
return np.sum((x1 - x2) ** 2)
def circular_kernel(x1, x2, center=None):
'''T
|
his kernel outputs lesser distance for the points that are from circumference
:params:
x1 - first point
x2 -
|
second point
center - center of circle(default = origin (0,0,...))
'''
if center is None:
center = np.zeros(len(x1))
dist1 = distance(x1, center)
dist2 = distance(x2, center)
return 1.0 - min(dist1, dist2) / max(dist1, dist2)
clusters = KernelKMeans().cluster(circles, 2, circular_kernel)
for i in range(k):
cpts = circles[clusters == i]
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
# # EM Algorithm with GMM
print("EM Algorithm")
# In[62]:
def multivar_gaussian_pdf(x, mu, covar):
return multivariate_normal.pdf(x, mean=mu, cov=covar)
class EM_GMM(object):
def __init__(self, data, k):
self.data = data
self.k = k
self.N = data.shape[0]
# theta param
self.mean, self.cov, self.weight = [], [], []
# random initialization
A = np.random.randint(k, size=data.shape[0])
for c in range(k):
cpts = data[A == c]
self.mean.append(np.mean(cpts, axis=0))
self.cov.append(np.cov(np.array(cpts).transpose()))
self.weight.append(1.0 * cpts.shape[0] / data.shape[0])
def compute_gamma(self):
gamma = np.zeros((self.N, self.k), dtype=float)
for idx, pt in enumerate(data):
pdf = []
for ct in range(k):
temp = multivar_gaussian_pdf(pt, self.mean[ct], self.cov[ct])
pdf.append(temp * self.weight[ct])
gamma[idx] = np.array(pdf) / sum(pdf)
return gamma
def update_theta(self, P):
weights = P.sum(axis=0)/P.sum()
means = []
covs = []
for i in range(self.k):
nr_mu = (P[:, i:i+1] * self.data).sum(axis=0)
dr_mu = P[:, i].sum(axis=0)
pt_mu = nr_mu / dr_mu
means.append(pt_mu)
for i in range(self.k):
nr_cov = (P[:, i:i+1] * (self.data - means[i])).transpose().dot(self.data - means[i])
dr_cov = P[:, i].sum(axis=0)
covs.append(nr_cov /
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QApplication.py
|
Python
|
gpl-2.0
| 14,490
| 0.009455
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QApplication(__PyQt4_QtCore.QCoreApplication):
"""
QApplication(list-of-str)
QApplication(list-of-str, bool)
QApplication(list-of-str, QApplication.Type)
QApplication(Display, int visual=0, int colormap=0)
QApplication(Display, list-of-str, int visual=0, int cmap=0)
"""
def aboutQt(self): # real signature unknown; restored from __doc__
""" QApplication.aboutQt() """
pass
def activeModalWidget(self): # real signature unknown; restored from __doc__
""" QApplication.activeModalWidget() -> QWidget """
return QWidget
def activePopupWidget(self): # real signature unknown; restored from __doc__
""" QApplication.activePopupWidget() -> QWidget """
return QWidget
def activeWindow(self): # real signature unknown; restored from __doc__
""" QApplication.activeWindow() -> QWidget """
return QWidget
def alert(self, QWidget, int_msecs=0): # real signature unknown; restored from __doc__
""" QApplication.alert(QWidget, int msecs=0) """
pass
def allWidgets(self): # real signature unknown; restored from __doc__
""" QApplication.allWidgets() -> list-of-QWidget """
pass
def autoSipEnabled(self): # real signature unknown; restored from __doc__
""" QApplication.autoSipEnabled() -> bool """
return False
def beep(self): # real signature unknown; restored from __doc__
""" QApplication.beep() """
pass
def changeOverrideCursor(self, QCursor): # real signature unknown; restored from __doc__
""" QApplication.changeOverrideCursor(QCursor) """
pass
def clipboard(self): # real signature unknown; restored from __doc__
""" QApplication.clipboard() -> QClipboard """
return QClipboard
def closeAllWindows(self): # real signature unknown; restored from __doc__
""" QApplication.closeAllWindows() """
pass
def colorSpec(self): # real signature unknown; restored from __doc__
""" QApplication.colorSpec() -> int """
return 0
def commitData(self, QSessionManager): # real signature unknown; restored from __doc__
""" QApplication.commitData(QSessionManager) """
pass
def commitDataRequest(self, *args, **kwargs): # real signature unknown
""" QApplication.commitDataRequest[QSessionManager] [signal] """
pass
def cursorFlashTime(self): # real signature unknown; restored from __doc__
""" QApplication.cursorFlashTime() -> int """
return 0
def desktop(self): # real signature unknown; restored from __doc__
""" QApplication.desktop() -> QDesktopWidget """
return QDesktopWidget
def desktopSettingsAware(self): # real signature unknown; restored from __doc__
""" QApplication.desktopSettingsAware() -> bool """
return False
def doubleClickInterval(self): # real signature unknown; restored from __doc__
""" QApplication.doubleClickInterval() -> int """
return 0
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QApplication.event(QEvent) -> bool """
return False
def exec(self): # real signature unknown; restored from __doc__
""" QApplication.exec() -> int """
return 0
def exec_(self): # real signature unknown; restored from __doc__
""" QApplication.exec_() -> int """
return 0
def focusChanged(self, *args, **kwargs): # real signature unknown
""" QApplication.focusChanged[QWidget, QWidget] [signal] """
pass
def focusWidget(self): # real signature unknown; restored from __doc__
""" QApplication.focusWidget() -> QWidget """
return QWidget
def font(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QApplication.font() -> QFont
QApplication.font(QWidget) -> QFont
QApplication.font(str) -> QFont
"""
return QFont
def fontDatabaseChanged(self, *args, **kwargs): # real signature unknown
""" QApplication.fontDatabaseChanged [signal] """
pass
def fontMetrics(self): # real signature unknown; restored from __doc__
""" QApplication.fontMetrics() -> QFontMetrics """
return QFontMetrics
def globalStrut(self): # real signature unknown; restored from __doc__
""" QApplication.globalStrut() -> QSize """
pass
def inputContext(self): # real signature unknown; restored from __doc__
""" QApplication.inputContext() -> QInputContext """
return QInputContext
def isEffectEnabled(self, Qt_UIEffect): # real signature unknown; restored from __doc__
""" QApplication.isEffectEnabled(Qt.UIEffect) -> bool """
return False
def isLeftToRight(self): # real signature unknown; restored from __doc__
""" QApplication.isLeftToRight() -> bool """
return False
def isRightToLeft(self): # real signature unknown; restored from __doc__
""" QApplication.isRightToLeft() -> bool """
return False
def isSessionRestored(self): # real signature unknown; restored from __doc__
""" QApplication.isSessionRestored() -> bool """
return False
def keyboardInputDirection(self): # real signature unknown; restored from __doc__
""" QApplication.keyboardInputDirection() -> Qt.LayoutDirection """
pass
def keyboardInputInterval(self): # real signature unknown; restored from __doc__
""" QApplication.keyboardInputInterval() -> int """
return 0
def keyboardInputLocale(self): # real signature unknown; restored from __doc__
""" QApplication.keyboardInputLocale() -> QLocale """
pass
def keyboardModifiers(self): # real signature unknown; restored from __doc__
""" QApplication.keyboardModifiers() -> Qt.KeyboardModifiers """
pass
def lastWindowClosed(self, *args, **kwargs): # real signature
|
unknown
""" QApplication.lastWindowClosed [sign
|
al] """
pass
def layoutDirection(self): # real signature unknown; restored from __doc__
""" QApplication.layoutDirection() -> Qt.LayoutDirection """
pass
def mouseButtons(self): # real signature unknown; restored from __doc__
""" QApplication.mouseButtons() -> Qt.MouseButtons """
pass
def notify(self, QObject, QEvent): # real signature unknown; restored from __doc__
""" QApplication.notify(QObject, QEvent) -> bool """
return False
def overrideCursor(self): # real signature unknown; restored from __doc__
""" QApplication.overrideCursor() -> QCursor """
return QCursor
def palette(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QApplication.palette() -> QPalette
QApplication.palette(QWidget) -> QPalette
QApplication.palette(str) -> QPalette
"""
return QPalette
def queryKeyboardModifiers(self): # real signature unknown; restored from __doc__
""" QApplication.queryKeyboardModifiers() -> Qt.KeyboardModifiers """
pass
def quitOnLastWindowClosed(self): # real signature unknown; restored from __doc__
""" QApplication.quitOnLastWindowClosed() -> bool """
return False
def restoreOverrideCursor(self): # real signature unknown; restored from __doc__
""" QApplication.restoreOverrideCursor() """
pass
def saveState(self, QSessionManager): # real signature unknown; restored from __doc__
""" QApplication.saveState(QSessionManager) """
pass
def saveStateRequest(self, *args, **kwargs): # real signature unknown
""" QApplication.saveStateRequest[QSessionManager] [signal] """
pass
def sessionId(self): # real signature unknown; restored from __doc__
""" QApplication.sessionId()
|
sirk390/coinpy
|
coinpy-lib/src/coinpy/lib/blockchain/bsddb/serialization/s11n_disktxpos.py
|
Python
|
lgpl-3.0
| 804
| 0.002488
|
from coinpy.lib.serializati
|
on.common.field import Field
from coinpy.lib.serialization.common.structure import Structure
from coinpy.lib.blockchain.bsddb.objects.disktxpos import DiskTxPos
class DiskTxPosSerializer():
DISKTXPOS = Structure([Field("<I", "file"),
Field("<I", "blockpos"),
Field("<I", "txpos")], "disktxpos")
def serialize(self, disktxpos_obj):
|
return (self.DISKTXPOS.serialize([disktxpos_obj.file,
disktxpos_obj.blockpos,
disktxpos_obj.txpos]))
def deserialize(self, data, cursor=0):
(file, nblockpos, ntxpos), cursor = self.DISKTXPOS.deserialize(data, cursor)
return (DiskTxPos(file, nblockpos, ntxpos), cursor)
|
steko/totalopenstation
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,411
| 0.006057
|
# -*- coding: utf-8 -*-
#
# Total Open Station documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 28 23:03:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../totalopenstation'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Total Open Station'
copyright = '2015-2020, Stefano Costa, Damien Gaignon and Luca Bianconi'
author = 'Stefano Costa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build',
'global.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_prolog = """
.. include:: /global.rst
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'totalopenstation',
'github_repo': 'totalopenstation',
'github_type': 'star',
'github_count': 'true',
'github_button': True,
'description': 'Download and export field survey data from your total station'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 1
|
6x16 or 32x32
# pixels large.
html_favicon = "tops.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of
|
the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TotalOpenStationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TotalOpenStation.tex', 'Total Open Station Documentation',
'Stefano Costa, Damien Gaignon, Luca Bianconi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('totalopenstation-cli-parser',
'totalopenstation-cli-parser',
'Total Open Station command line converter',
['Stefano Costa, Luca Bianconi'],
1),
(
|
Ruide/angr-dev
|
simuvex/simuvex/plugins/cgc.py
|
Python
|
bsd-2-clause
| 86
| 0.011628
|
print '... Importing s
|
imuvex/plugins/cgc.py ...'
from angr.s
|
tate_plugins.cgc import *
|
lnls-sirius/dev-packages
|
siriuspy/siriuspy/clientconfigdb/types/si_tunecorr_params.py
|
Python
|
gpl-3.0
| 1,290
| 0
|
"""SI tune correction configuration.
Values in _template_dict are arbitrary. They are used just to compare with
corresponding values when a new configuration is tried to be inserted i
|
n the
servconf database.
"""
from copy import deepcopy as _dcopy
def get_dict():
"""Return configuration t
|
ype dictionary."""
module_name = __name__.split('.')[-1]
_dict = {
'config_type_name': module_name,
'value': _dcopy(_template_dict)
}
return _dict
# Tune Correction Parameters for Storage Ring
#
# | DeltaTuneX | | m00 m01...m07 | | KL SI QFA |
# | | = | | * | . |
# | DeltaTuneY | | m10 m11...m17 | | . |
# | . |
# | KL SI QDP2 |
# Where (1+f)KL = KL + DeltaKL.
#
# Correction Matrix of Svd and Additional Method
# (obtained by matlab lnls_calc_tunecorr_params routine)
# m(0,0) m(0,1)...m(0,7)
# m(1,0) m(1,1)...m(1,7)
#
# Nominals KLs
# [quadrupole_order QFA QFB QFP QDA QDB1 QDB2 QDP1 QDP2]
_template_dict = {
'matrix': [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
'nominal KLs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
}
|
TheQtCompany/git-repo
|
subcmds/diff.py
|
Python
|
apache-2.0
| 1,328
| 0.004518
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co
|
py of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS
|
" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from command import PagedCommand
class Diff(PagedCommand):
common = True
helpSummary = "Show changes between commit and working tree"
helpUsage = """
%prog [<project>...]
The -u option causes '%prog' to generate diff output with file paths
relative to the repository root, so the output can be applied
to the Unix 'patch' command.
"""
def _Options(self, p):
p.add_option('-u', '--absolute',
dest='absolute', action='store_true',
help='Paths are relative to the repository root')
def Execute(self, opt, args):
ret = 0
for project in self.GetProjects(args):
if not project.PrintWorkTreeDiff(opt.absolute):
ret = 1
return ret
|
OCA/stock-logistics-warehouse
|
stock_request_picking_type/__init__.py
|
Python
|
agpl-3.0
| 133
| 0
|
# Copyright 2019 Open Source Integrators
# License LGPL-3.0 or later (https://www.gnu.org/licenses/l
|
gpl.html).
fr
|
om . import models
|
enolfc/keystone-voms
|
tests/test_middleware_voms_authn.py
|
Python
|
apache-2.0
| 23,887
| 0
|
# Copyright 2012 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone import config
from keystone import exception as ks_exc
from keystone.assignment import controllers
from keystone import middleware
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests.ksfixtures import database
from keystone.tests import test_auth
from keystone.tests import test_middleware
import keystone_voms.core as ks_voms
from keystone_voms import exception
CONF = config.CONF
# fake proxy from a fake cert from a fake ca
user_dn = "/C=ES/O=FAKE CA/CN=Fake User"
user_vo = "dteam"
valid_cert = """-----BEGIN CERTIFICATE-----
MIIGNjCCBZ+gAwIBAgIUI6TVyFmQEXRIq6FOHrmHtb56XDMwDQYJKoZIhvcNAQEF
BQAwMzELMAkGA1UEBhMCRVMxEDAOBgNVBAoTB0ZBS0UgQ0ExEjAQBgNVBAMTCUZh
a2UgVXNlcjAeFw0xMjA4MzAxNDI2MjBaFw0yNDAxMjcwNTMxMjBaMEgxCzAJBgNV
BAYTAkVTMRAwDgYDVQQKEwdGQUtFIENBMRIwEAYDVQQDEwlGYWtlIFVzZXIxEzAR
BgNVBAMTCjE3MDAwOTE3MTMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALNI
YdjO2XueOPtSEp2GeshPQuRvXl4937vz4WPu9hVemuxS83kXfi2PP9FAoP5lQv4g
+RXStuOy47Cr2Qc6OYg6+YUPTWlQAIFVnLlDgsNvxhqG4YvQwIEsy6n1Q/TjnbKZ
LG2qNRMfUR+I7EhPKqyZW1PLUoKP30MNo++eJW8XAgMBAAGjggQwMIIELDCCA94G
CisGAQQBvkVkZAUEggPOMIIDyjCCA8YwggPCMIIDKwIBATA+oDwwN6Q1MDMxCzAJ
BgNVBAYTAkVTMRAwDgYDVQQKEwdGQUtFIENBMRIwEAYDVQQDEwlGYWtlIFVzZXIC
AQagSjBIpEYwRDELMAkGA1UEBhMCRVMxEDAOBgNVBAoTB0ZBS0UgQ0ExIzAhBgNV
BAMTGmhvc3QvZmFrZS52b21zLXNlcnZlci5mYWtlMA0GCSqGSIb3DQEBBQUAAgEB
MCIYDzIwMTIwODMwMTQzMTIwWhgPMjAyNDAxMjcwNTMxMjBaMEIwQAYKKwYBBAG+
RWRkBDEyMDCgCoYIZHRlYW06Ly8wIgQgL2R0ZWFtL1JvbGU9TlVMTC9DYXBhYmls
aXR5PU5VTEwwggIeMIIB7gYKKwYBBAG+RWRkCgSCAd4wggHaMIIB1jCCAdIwggE7
AgEEMA0GCSqGSIb3DQEBBAUAMB8xEDAOBgNVBAoTB0ZBS0UgQ0ExCzAJBgNVBAYT
AkVTMB4XDTEyMDgyOTE3MzY0OVoXDTQwMDExNDE3MzY0OVowRDELMAkGA1UEBhMC
RVMxEDAOBgNVBAoTB0ZBS0UgQ0ExIzAhBgNVBAMTGmhvc3QvZmFrZS52b21zLXNl
cnZlci5mYWtlMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC/9bo6pA8fcuo4
2+CDV430nKykGB4mqsKqHkFCD8kRduW4eFdWrSXitqKRlw9/8hLmbsu5abPa/P99
VekJPCbZwtIm+3M1qGlJ+TonTWbBQakvOmPnoLH+/uppssyRulGj61AlnR20ByRo
2DbrSTThbdkztGOmZmQf2gzRGGtbxQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAH/g
EMVvDtgNaxzH5UYRubvapReeqspS5mYndaGFaztOJQ6pv1Qa7/LpkeYOxrXX+xWm
dYdXvHIYbMkc/pO0PyV/TIOb8EcgC/Gs3idZSHUxhcsk8IcpcwCrPczpu2JC+N5z
LTkbcREjevF7WFlPMlOq2IVEIVBo95uQaS3TdmJHMAkGA1UdOAQCBQAwHwYDVR0j
BBgwFoAUMXhLHLSgWZoV/Y8KaT6VOIQNVNQwDQYJKoZIhvcNAQEFBQADgYEAbngH
D69ViU3UsIbUlmr8a7pMhRSJRnXsO0xzg0rwy3g5KPqJM1zYYdNufHJkOdW+gjd5
w52n/zbwtXOwAW7xf9w+xQ1/gyj5Kb8Ob/iW3x4Qs0a3OEaWFyqTvN7J3vP91Qaz
S12lLPSLPdP6sFe0ODf3ZQOv19aN/eW8On2WIHMwDQYDVR0PAQH/BAMDAQAwDAYD
VR0TAQH/BAIwADAJBgNVHSMEAjAAMCAGCCsGAQUFBwEOAQH/BBEwDwIBATAKBggr
BgEFBQcVATANBgkqhkiG9w0BAQUFAAOBgQCPjeviQf/CbAh4z+0KtIgd7YLOiZiw
FcJwC/Z2+zm54d1SCCFMCCygKe5tu/gSLaEcRky6P1lG/0vG/7DxLiu37xQ15Mae
O32z0LuL+XkC3k8C+3aH0ht1cW+zwR4bBQax7rphByuY2Wgwf1TFlYdMU0eZ7akj
W5Rbega2GkADBQ==
-----END CERTIFICATE----- """
valid_cert_chain = """-----BEGIN CERTIFICATE-----
MIIBwTCCASoCAQYwDQYJKoZIhvcNAQEEBQAwHzEQMA4GA1UEChMHRkFLRSBDQTEL
MAkGA1UEBhMCRVMwHhcNMTIwODMwMTIxMjU0WhcNNDAwMTE1MTIxMjU0WjAzMQsw
CQYDVQQGEwJFUzEQMA4GA1UEChMHRkFLRSBDQTESMBAGA1UEAxMJRmFrZSBVc2Vy
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDE4WuwYDT+ke9olzMIZ+gTwDl1
cajAIvp6jVl40YYV2CgUdQo0CSj/bmB+y6U3GCdpp0HKNoAbCuYsyyUtqedgMy2D
x+We/3f005jQvSLtrnK3k8Nw2qwkClObKhyLw5j0iH0sx0PWbr4mIcic2AY8gWiM
OshoESxjXETMkqgQpQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAA9KBCfPLuWJWKN/
X+MgdJfMtg9MbfrKwQbmoxIS7qCEe2OUNs4BvHEnp7lBMJkaoSjhvFDOFMKaXmfz
Kl441BisyU4Pz8fHU5dj4Z7pPD7i71f1oK/42kZZWFEkoJxOU4Vu/fHr9DXdrBVH
9sFWctb8TM20AtJmYE/n+M1G6Foj
-----END CERTIFICATE-----"""
valid_cert_no_tenant = """-----BEGIN CERTIFICATE-----
MIIGMDCCBZmgAwIBAgIUdvt3rmPnrq2Kyoi6oKdeSb7Ye4EwDQYJKoZIhvcNAQEF
BQAwMzELMAkGA1UEBhMCRVMxEDAOBgNVBAoTB0ZBS0UgQ0ExEjAQBgNVBAMTCUZh
a2UgVXNlcjAeFw0xMjA4MzAxNDI5NTVaFw0yNDAxMjcwNTM0NTVaMEgxCzAJBgNV
BAYTAkVTMRAwDgYDVQQKEwdGQUtFIENBMRIwEAYDVQQDEwlGYWtlIFVzZXIxEzAR
BgNVBAMTCjE3MDAwOTE3MTMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALgC
BIZSxt4X4Hxuapff694eHx9pjpdpfnuU5E/zuv6qNjoZn10WzngonodRG6YGjY5r
yWZm2yplAWVXuZNMD7qOo3ToeBVhl5sK8dS/dCtrCrKcAoQCAq3CdOM/cUJyDW3m
I7hYvw0BfyuOAgqZuz2trGoObHhS3HrwuNgzAYnZAgMBAAGjggQqMIIEJjCCA9gG
CisGAQQBvkVkZAUEggPIMIIDxDCCA8AwggO8MIIDJQIBATA+oDwwN6Q1MDMxCzAJ
BgNVBAYTAkVTMRAwDgYDVQQKEwdGQUtFIENBMRIwEAYDVQQDEwlGYWtlIFVzZXIC
AQagSjBIpEYwRDELMAkGA1UEBhMCRVMxEDAOBgNVBAoTB0ZBS0UgQ0ExIzAhBgNV
BAMTGmhvc3QvZmFrZS52b21zLXNlcnZlci5mYWtlMA0GCSqGSIb3DQEBBQUAAgEB
MCIYDzIwMTIwODMwMTQzNDU1WhgPMjAyNDAxMjcwNTM0NTVaMDwwOgYKKwYBBAG+
RWRkBDEsMCqgFIYSbm9fc3VwcG9ydGVkX3ZvOi8vMBIEEC9ub19zdXBwb3J0ZWRf
dm8wggIeMIIB7gYKKwYBBAG+RWRkCgSCAd4wggHaMIIB1jCCAdIwggE7AgEEMA0G
CSqGSIb3DQEBBAUAMB8xEDAOBgNVBAoTB0ZBS0UgQ0ExCzAJBgNVBAYTAkVTMB4X
DTEyMDgyOTE3MzY0OVoXDTQwMDExNDE3MzY0OVowRDELMAkGA1UEBhMCRVMxEDAO
BgNVBAoTB0ZBS0UgQ0ExIzAhBgNVBAMTGmhvc3QvZmFrZS52b21zLXNlcnZlci5m
YWtlMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC/9bo6pA8fcuo42+CDV430
nKykGB4mqsKqHkFCD8kRduW4eFdWrSXitqKRlw9/8hLmbsu5abPa/P99VekJPCbZ
wtIm+3M1qGlJ+TonTWbBQakvOmPnoLH+/uppssyRulGj61AlnR20ByRo2DbrSTTh
bdkztGOmZmQf2gzRGGtbxQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAH/gEMVvDtgN
axzH5UYRubvapReeqspS5mYndaGFaztOJQ6pv1Qa7/LpkeYOxrXX+xWmdYdXvHIY
bMkc/pO0PyV/TIOb8EcgC/Gs3idZSHUxhcsk8IcpcwCrPczpu2JC+N5zLTkbcREj
evF7WFlPMlOq2IVEIVBo95uQaS3TdmJHMAkGA1UdOAQCBQAwHwYDVR0jBBgwFoAU
MXhLHLSgWZoV/Y8KaT6VOIQNVNQwDQYJKoZIhvcNAQEFBQADgYEACztWoNeofMnd
das5pTFA8WJgrMXa8BslqM+hm/VPwA+4MoGMxQadDQGzuLSp9yMHcYzvj+Gimjs4
WZHAshZdd6E9S2hQoDRUpQguu5CNeKdJ7uUb+QQinTD6y3DjdxCFE10pFunYEMnY
2JSJbEqm32ybnFPdBBqqYlb3gXGEVQwwDQYDVR0PAQH/BAMDAQAwDAYDVR0TAQH/
BAIwADAJBgNVHSMEAjAAMCAGCCsGAQUFBwEOAQH/BBEwDwIBATAKBggrBgEFBQcV
ATANBgkqhkiG9w0BAQUFAAOBgQAQjXxCkLajAedCNqIYw1L5qlWT71sF2FgSoyEk
B7iMyweroDP90CzR1DIwWj5yGr138Z3jvDvFRzQpUrZa4hsPck/zmO/lTB+6iA/U
V5PvMZQ8wMyfMlSiFQNvWm7weNlFpvUpNRHQQj3FLb8L55RhtONIYFRzTIS9du3P
c8Dc+w==
-----END CERTIFICATE-----"""
def get_auth_body(tenant=None):
d = {"auth": {"voms": True}}
if tenant is not None:
d["auth"]["tenantName"] = tenant
return d
def prepare_request(body=None, cert=None, chain=None):
req = test_middleware.make_request()
if body:
req.environ[middleware.PARAMS_ENV] = body
if cert:
req.environ[ks_voms.SSL_CLIENT_CERT_ENV] = cert
if chain:
req.environ[ks_voms.SSL_CLIENT_CERT_CHAIN_ENV_PREFIX +
|
"0"] = chain
return req
class MiddlewareVomsAuthn(tests.TestCase):
def setUp(self):
super(MiddlewareVomsAuthn, self).setUp()
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests_conf('keystone_voms.conf')])
self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
self.tenant_name = default_fixtures.TEN
|
ANTS[0]['name']
CONF.voms.voms_policy = tests.dirs.tests_conf("voms.json")
def test_middleware_proxy_unscoped(self):
"""Verify unscoped request."""
req = prepare_request(get_auth_body(),
valid_cert,
valid_cert_chain)
aux = ks_voms.VomsAuthNMiddleware(None)
aux._no_verify = True
aux._process_request(req)
user_out = req.environ['REMOTE_USER']
params = req.environ[middleware.PARAMS_ENV]
self.assertEqual(user_out, user_dn)
self.assertNotIn("tenantName", params)
def test_middleware_proxy_scoped(self):
"""Verify scoped request."""
req = prepare_request(get_auth_body(tenant=self.tenant_name),
valid_cert,
valid_cert_chain)
aux = ks_voms.VomsAuthNMiddleware(None)
aux._no_verify = True
aux._proce
|
chewse/djangorestframework-signed-permissions
|
signedpermissions/__init__.py
|
Python
|
mit
| 174
| 0
|
# -*- coding: utf-8 -*-
from .permissions import SignedPermission #
|
noqa
from .signing import sign_filter_permissions # noqa
from .views import SignedViewSetMixin #
|
noqa
|
andymeneely/attack-surface-metrics
|
attacksurfacemeter/loaders/cflow_line_parser.py
|
Python
|
mit
| 1,154
| 0
|
__author__ = 'kevin'
import re
from attacksurfacemeter.loaders.base_line_parser import BaseLineParser
class CflowLineParser(BaseLineParser):
""""""
_instance = None
@staticmethod
def get_instance(cflow_line=None):
if CflowLineParser._instance is None:
CflowLineParser._instance = CflowLineParser()
CflowLineParser._instance.load(cflow_line)
return CflowLineParser._instance
indent = " "
def __init__(self):
super(CflowLineParser, self).__init__()
s
|
elf._level = 0
def load(self, cflow_line):
self.__init__()
split_line = cflow_line.split(CflowLineParser.indent
|
)
function_info = split_line[-1].strip()
self._level = len(split_line) - 1
function_name = re.search(r"(\w+\(\))", function_info).group(0)
self._function_name = function_name[:function_name.index('(')]
match = re.search(r"(?:at\s)(\..*)(?::\d+>)", function_info)
if match:
self._function_signature = match.group(1)
def get_level(self, cflow_line=None):
self._load_if_new(cflow_line)
return self._level
|
tks0123456789/kaggle-Otto
|
exp_XGB_CF_tc_mb_mf_ntree.py
|
Python
|
mit
| 6,574
| 0.009735
|
"""
Experiment for XGBoost + CF
Aim: To find the best tc(max_depth), mb(min_child_weight), mf(colsample_bytree * 93), ntree
tc: [13, 15, 17]
mb: [5, 7, 9]
mf: [40, 45, 50, 55, 60]
ntree: [160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360]
Averaging 20 models
Summary
Best
loss ntree
mf 40 45 50 55 60 40 45 50 55 60
tc mb
13 5 0.4471 0.4471 0.4473 0.4471 0.4476 300 300 280 280 260
7 0.4477 0.4475 0.4469 0.4472 0.4481 340 320 300 300 300
9 0.4485 0.4484 0.4487 0.4488 0.4487 360 360 340 340 340
15 5 0.4471 *0.4465* 0.4471 0.4476 0.4478 260 *260* 240 240 240
7 0.4473 0.4468 0.4473 0.4474 0.4478 300 280 260 260 260
9 0.4483 0.4480 0.4483 0.4484 0.4492 340 320 300 300 280
17 5 0.4471 0.4472 0.4474 0.4476 0.4478 240 240 220 220 200
7 0.4474 0.4470 0.4468 0.4475 0.4473 280 260 260 240 240
9 0.4481 0.4480 0.4476 0.4480 0.4486 320 300 280 260 260
Time: 1 day, 7:37:21 on i7-4790k 32G MEM GTX660
"""
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
from datetime import datetime
import os
from sklearn.grid_search import ParameterGrid
import xgboost as xgb
from utility import *
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
kf = StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train_1 = yMat[train_idx].argmax(1)
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
X2, ignore = count_feature(X)
dtrain , dvalid= xgb.DMatrix(X2[train_idx], label = y_train_1), xgb.DMatrix(X2[valid_idx])
#
nIter = 20
nt = 360
nt_lst = range(160, 370, 20)
nt_len = len(nt_lst)
bf = .8 # subsample
sh = .1 # eta
# tc:max_depth, mb:min_child_weight, mf(max features):colsample_bytree * 93
param_grid = {'tc':[13, 15, 17], 'mb':[5, 7, 9], 'mf':[40, 45, 50, 55, 60]}
scores = []
t0 = datetime.now()
for params in ParameterGrid(param_grid):
tc = params['tc']
|
mb = params['mb']
mf = params['mf']
cs = float(mf) / X.shape[1]
print tc, mb, mf
|
predAll = [np.zeros(y_valid.shape) for k in range(nt_len)]
for i in range(nIter):
seed = 112233 + i
param = {'bst:max_depth':tc, 'bst:eta':sh,'objective':'multi:softprob','num_class':9,
'min_child_weight':mb, 'subsample':bf, 'colsample_bytree':cs,
'silent':1, 'nthread':8, 'seed':seed}
plst = param.items()
bst = xgb.train(plst, dtrain, nt)
for s in range(nt_len):
ntree = nt_lst[s]
pred = bst.predict(dvalid, ntree_limit = ntree).reshape(y_valid.shape)
predAll[s] += pred
scores.append({'tc':tc, 'mb':mb, 'mf':mf, 'ntree':ntree, 'nModels':i+1, 'seed':seed,
'valid':log_loss(y_valid, pred),
'valid_avg':log_loss(y_valid, predAll[s] / (i+1))})
print scores[-4], datetime.now() - t0
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print 'mkdir', path_log
os.mkdir(path_log)
df.to_csv(path_log + 'exp_XGB_CF_tc_mb_mf_ntree.csv')
keys = ['tc', 'mb', 'mf', 'ntree']
grouped = df.groupby(keys)
pd.set_option('display.precision', 5)
print pd.DataFrame({'loss':grouped['valid_avg'].last().unstack().min(1),
'ntree':grouped['valid_avg'].last().unstack().idxmin(1)}).unstack()
# loss ntree
# mf 40 45 50 55 60 40 45 50 55 60
# tc mb
# 13 5 0.4471 0.4471 0.4473 0.4471 0.4476 300 300 280 280 260
# 7 0.4477 0.4475 0.4469 0.4472 0.4481 340 320 300 300 300
# 9 0.4485 0.4484 0.4487 0.4488 0.4487 360 360 340 340 340
# 15 5 0.4471 0.4465 0.4471 0.4476 0.4478 260 260 240 240 240
# 7 0.4473 0.4468 0.4473 0.4474 0.4478 300 280 260 260 260
# 9 0.4483 0.4480 0.4483 0.4484 0.4492 340 320 300 300 280
# 17 5 0.4471 0.4472 0.4474 0.4476 0.4478 240 240 220 220 200
# 7 0.4474 0.4470 0.4468 0.4475 0.4473 280 260 260 240 240
# 9 0.4481 0.4480 0.4476 0.4480 0.4486 320 300 280 260 260
print pd.DataFrame({'loss':grouped['valid'].mean().unstack().min(1),
'ntree':grouped['valid'].mean().unstack().idxmin(1)}).unstack()
# loss ntree
# mf 40 45 50 55 60 40 45 50 55 60
# tc mb
# 13 5 0.4563 0.4564 0.4564 0.4561 0.4566 280 260 260 260 240
# 7 0.4565 0.4563 0.4557 0.4561 0.4569 320 300 300 300 280
# 9 0.4571 0.4569 0.4571 0.4573 0.4570 340 340 320 300 300
# 15 5 0.4567 0.4559 0.4565 0.4571 0.4571 260 240 240 220 220
# 7 0.4565 0.4558 0.4562 0.4564 0.4568 280 260 260 260 240
# 9 0.4570 0.4567 0.4570 0.4570 0.4577 300 300 280 280 260
# 17 5 0.4568 0.4569 0.4570 0.4572 0.4574 220 220 200 200 200
# 7 0.4567 0.4563 0.4559 0.4567 0.4564 260 240 240 220 220
# 9 0.4571 0.4569 0.4565 0.4567 0.4573 280 280 260 260 240
#
criterion = df.apply(lambda x: x['tc']==15 and x['mb']==5 and x['mf']==45, axis = 1)
grouped = df[criterion].groupby('ntree')
g = grouped[['valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print g
# valid valid_avg
# ntree
# 160 0.461023 0.452912
# 180 0.458513 0.450111
# 200 0.456939 0.448232
# 220 0.456147 0.447141
# 240 0.455870 0.446598
# 260 0.456097 0.446525
# 280 0.456657 0.446827
# 300 0.457434 0.447327
# 320 0.458462 0.448101
# 340 0.459635 0.449036
# 360 0.460977 0.450160
ax = g.plot()
ax.set_title('XGB+CF max_depth=15\n min_child_weight=5, colsample_bytree=45/93.')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_XGB_CF_tc_mb_mf_ntree.png')
|
mjenrungrot/competitive_programming
|
Facebook Hackercup/2020/Round 1/A1.py
|
Python
|
mit
| 3,016
| 0.001658
|
import math
from collections import deque
def run():
N, K, W = list(map(int, input().split()))
Ls = list(map(int, input().split()))
A_L, B_L, C_L, D_L = list(map(int, input().split()))
Hs = list(map(int, input().split()))
A_H, B_H, C_H, D_H = list(map(int, input().split()))
for i in range(K+1, N+1):
Li = ((A_L * Ls[-2] + B_L * Ls[-1] + C_L) % D_L) + 1
Ls.append(Li)
Hi = ((A_H * Hs[-2] + B_H * Hs[-1] + C_H) % D_H) + 1
Hs.append(Hi)
MODULO = 1000000007
height_queue = deque()
curr_perim = 2 * W + 2 * Hs[0]
ending_x = Ls[0] + W
height_queue_len = 1
height_queue.append((Hs[0], ending_x))
ans = curr_perim
# print("\ti = 0 curr = {}".format(curr_perim))
for i in range(1, N):
starting_x = Ls[i]
while height_queue_len > 0 and height_queue[0][1] < starting_x:
height_queue.popleft()
height_queue_len -= 1
# print(height_queue)
if starting_x > ending_x:
ending_x = Ls[i] + W
curr_perim += 2 * W + 2 * Hs[i]
height_queue.append((Hs[i], ending_x))
height_queue_len += 1
else:
# print("\tCase 2 [Adding = {} {} {}]".format(Ls[i], W, Hs[i]))
new_ending_x = Ls[i] + W
excess_x = (ending_x - starting_x)
curr_perim -= 2 * excess_x
# print("-2*{}".format(excess_x))
curr_perim += 2 * W
# print("+2*{}".format(W))
"""
2 5
___________
| ___|_________
| | |
4| | | 3
|_______|____________|
4 7
"""
max_height = -1
while height_queue_len > 0 and Hs[i] >= height_queue[0][0]:
max_height = max(max_height, height_q
|
ueue[0][0])
height_queue.popleft()
height_queue_len -= 1
height_queue.append((Hs[i], new_ending_x))
height_queue_len += 1
if max_height > -1:
curr_perim -= max_height
curr_perim += abs(Hs[i] - max_height) + Hs[i]
# print("adding
|
{} {}".format(abs(Hs[i] - max_height), Hs[i]))
""" ________________
| |
2 5| |
___________| |
| ___|_________ | 6
| | | | |
4| | | | 3 |
|_______|__|_________|____|
3 4 5 7 8
"""
ending_x = new_ending_x
# print("\ti = {} {}".format(i, curr_perim))
ans = (ans * curr_perim) % MODULO
return ans
if __name__ == '__main__':
T = int(input())
for i in range(1, T+1):
print("Case #{}: {}".format(i, run()))
|
lawrencejones/neuro
|
iz/ModularFocalNetwork.py
|
Python
|
gpl-3.0
| 1,792
| 0.00279
|
"""
Examples
========
ModularFocalNetwork(8, [1600, 800], 4).plot() => 8 modules, 4 connections to each neuron
"""
import numpy as np
from Plotters import plot_connectivity_matrix
def range_from_base(base, size):
return xrange(base, base + size)
class ModularFocalNetwork(object):
def __init__(self, C, dim, focal_width):
"""
Generates connectivity matrix for a modular network with...
C -- # communities/modules
dim -- dimensions of matrix, [nodes_in_target_layer, nodes_in_input_layer]
focal_width -- how connections per node in target layer
Each community will have an even number of nodes, where each node has focal_width
connections from randomly chosen nodes in the input layer.
CIJ[i,j] represent
|
s the connection from node j in input layer to node i in this layer.
"""
self.C = C
self.dim = dim
self.module_dim = [layer_size / C for layer_size in dim]
self.focal_width = focal_width
self.CIJ = np.zeros(dim)
for i in range(C):
self.init_module(i)
def init_module(self, module_index):
"""
|
Initialises the target module with connections from the input layer.
"""
target_dim, input_dim = self.module_dim
input_nodes = range_from_base(module_index * input_dim, input_dim)
target_nodes = range_from_base(module_index * target_dim, target_dim)
for i in target_nodes:
nodes_to_connect = np.random.choice(input_nodes, self.focal_width, replace=False)
self.CIJ[i, nodes_to_connect] = 1
def plot(self):
"""
Uses pyplot to draw a plot of the connectivity matrix
"""
plot_connectivity_matrix(self.CIJ, self.dim).show()
|
wbrefvem/festina
|
festina/urls.py
|
Python
|
mit
| 345
| 0.011594
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'festina.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
|
url(r'^resume/$', i
|
nclude('resume.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
kadrlica/ugali
|
ugali/analysis/farm.py
|
Python
|
mit
| 8,918
| 0.013007
|
#!/usr/bin/env python
"""
Dispatch the likelihood scan to a cluster.
"""
import os,sys
from os.path import join, exists
import shutil
import subprocess
import time
import glob
import numpy as np
import healpy as hp
import ugali.utils.config
import ugali.utils.skymap
import ugali.utils.batch
from ugali.utils.projector import gal2cel,cel2gal
from ugali.utils.healpix import subpixel,superpixel,query_disc
from ugali.utils.healpix import pix2ang,ang2vec,read_partial_map
from ugali.utils.logger import logger
from ugali.utils.shell import mkdir
class Farm:
""" Class for organizing and submitting likelihood scan jobs. """
def __init__(self, configfile, verbose=False):
self.configfile = configfile
self.config = ugali.utils.config.Config(configfile)
self._setup()
self.verbose = verbose
def _setup(self):
self.nside_catalog = self.config['coords']['nside_catalog']
self.nside_likelihood = self.config['coords']['nside_likelihood']
self.nside_pixel = self.config['coords']['nside_pixel']
self.filenames = self.config.getFilenames()
self.skip = "Outfile already exists. Skipping..."
# Might consider storing only the good filenames
# self.filenames = self.filenames.compress(~self.filenames.mask['pix'])
self.catalog_pixels = self.filenames['pix'].compressed()
def command(self, outfile, configfile, pix):
"""
Generate the command for running the likelihood scan.
"""
params = dict(script=self.config['scan']['script'],
config=configfile, outfile=outfile,
nside=self.nside_likelihood, pix=pix,
verbose='-v' if self.verbose else '')
cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s'%params
return cmd
def submit_all(self, coords=None, queue=None, debug=False):
"""
Submit likelihood analyses on a set of coordinates. If
coords is `None`, submit all coordinates in the footprint.
Inputs:
coords : Array of target locations in Galactic coordinates.
queue : Overwrite submit queue.
debug : Don't run.
"""
if coords is None:
pixels = np.arange(hp.nside2npix(self.nside_likelihood))
else:
lon,lat,radius = coords['lon'],coords['lat'],coords['radius']
#ADW: coords are always parsed in GAL, so convert to CEL if necessary
if self.config['coords']['coordsys'].lower() == 'cel':
lon,lat = gal2cel(lon,lat)
vec = ang2vec(lon,lat)
pixels = np.zeros(0, dtype=int)
for v,r in zip(vec,radius):
pix = query_disc(self.nside_likelihood,v,r,inclusive=True,fact=32)
pixels = np.hstack([pixels, pix])
#pixels = np.unique(pixels)
inside = ugali.utils.skymap.inFootprint(self.config,pixels)
if inside.sum() != len(pixels):
logger.warning("Ignoring pixels outside survey footprint:\n"+str(pixels[~inside]))
if inside.sum() == 0:
logger.warning("No pixels inside footprint.")
return
# Only write the configfile once
outdir = mkdir(self.config['output']['likedir'])
# Actually copy config instead of re-writing
shutil.copy(self.config.filename,outdir)
configfile = join(outdir,os.path.basename(self.config.filename))
pixels = pixels[inside]
self.submit(pixels,queue=queue,debug=debug,configfile=configfile)
def submit(self, pixels, queue=None, debug=False, configfile=None):
"""
Submit the likelihood job for the given pixel(s).
"""
# For backwards compatibility
batch = self.config['scan'].get('batch',self.config['batch'])
queue = batch.get('default','medium') if queue is None else queue
# Need to develop some way to take command line arguments...
self.batch = ugali.utils.batch.batchFactory(queue,**batch.get(queue,{}))
self.batch.max_jobs = self.config['scan'].get('max_jobs',200)
if np.isscalar(pixels): pixels = np.array([pixels])
outdir = mkdir(self.config['output']['likedir'])
logdir = mkdir(join(outdir,'log'))
subdir = mkdir(join(outdir,'sub'))
# Save the current configuation settings; avoid writing
# file multiple times if configfile passed as argument.
if configfile is None:
shutil.copy(self.config.filename,outdir)
configfile = join(outdir,os.path.basename(self.config.filename))
lon,lat = pix2ang(self.nside_likelihood,pixels)
commands = []
chunk = self.config['scan'].get('chunk',25)
istart = 0
logger.info('=== Submit Likelihood ===')
for ii,pix in enumerate(pixels):
msg = ' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)'
msg = msg%(ii+1,len(pixels),pix, self.nside_likelihood,lon[ii],lat[ii])
logger.info(msg)
# Create outfile name
outfile = self.config.likefile%(pix,self.config['coords']['coordsys'].lower())
outbase = os.path.basename(outfile)
jobname = batch.get('jobname','ugali')
# Submission command
sub = not os.path.exists(outfile)
cmd = self.command(outfile,configfile,pix)
commands.append([ii,cmd,lon[ii],lat[ii],sub])
if chunk == 0:
# No chunking
command = cmd
submit = sub
logfile = join(logdir,os.path.splitext(outbase)[0]+'.log')
elif (len(commands)%chunk==0) or (ii+1 == len(pixels)):
# End of chunk, create submission script
commands = np.array(commands,dtype=object)
istart, iend = commands[0][0], commands[-1][0]
subfile = join(subdir,'submit_%08i_%08i.sh'%(istart,iend))
logfile = join(logdir,'submit_%08i_%08i.log'%(istart,iend))
command = "sh %s"%
|
subfile
submit = np.any(commands[:,-1])
if submit: self.write_script(subfile,commands)
else:
# Not end of chunk
continue
commands=[]
# Actual job submission
if not submit:
logger.info(self.skip)
continue
else:
job = self.batch.submit(command,jobname,logfile)
lo
|
gger.info(" "+job)
time.sleep(0.5)
def write_script(self, filename, commands):
""" Write a batch submission script.
Parameters
----------
filename : filename of batch script
commands : list of commands to execute
Returns
-------
None
"""
info = 'echo "{0:=^60}";\n'
hline = info.format("")
newline = 'echo;\n'
shebang = "#!/usr/bin/env bash"
# Limit the memory based on SLAC 4 GB per node (defined in KB)
# Careful, shell arithmetic is weird.
memory_limit = """
if [ -n "$LSB_CG_MEMLIMIT" ] & [ -n "$LSB_HOSTS" ]; then
mlimit=$(( $(wc -w <<< $LSB_HOSTS) * $LSB_CG_MEMLIMIT/1024 * 9/10 ))
ulimit -v ${mlimit}; ulimit -H -v ${mlimit};
fi
"""
memory_usage=r"""free -m | awk 'NR==2{printf "Memory Usage: %.2f/%.2fGB (%.2f%%)\n",$3/1024,$2/1024,$3*100/$2}';"""
memory_usage=r"""ps -U $USER --no-headers -o rss | awk '{sum+=$1} END {print "Memory Usage: " int(sum/1024**2) "GB"}'"""
istart, iend = commands[0][0], commands[-1][0]
script = open(filename,'w')
script.write(shebang)
#script.write(memory_limit)
script.write(hline)
script.write(info.format('Submit Jobs %i to %i'%(istart,iend)))
script.write(hline)
script.write(newline)
script.write('status=0;\n')
for i,cmd,lon,lat,sub in commands:
script.write(info.format('Job %i: (%.2f, %.2f)'%(i,lon,lat)))
if sub:
sc
|
js0701/chromium-crosswalk
|
third_party/WebKit/Source/bindings/scripts/v8_methods.py
|
Python
|
bsd-3-clause
| 24,023
| 0.002622
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for methods.
Extends IdlArgument with property |default_cpp_value|.
Extends IdlTypeBase and IdlUnionType with property |union_arguments|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
from idl_definitions import IdlArgument, IdlOperation
from idl_types import IdlTypeBase, IdlUnionType, inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (has_extended_attribute_value, is_unforgeable,
is_legacy_interface_type_checking)
# Methods with any of these require custom method registration code in the
# interface's configure*Template() function.
CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES = frozenset([
'DoNotCheckSecurity',
'DoNotCheckSignature',
])
def use_local_result(method):
extended_attributes = method.extended_attributes
idl_type = method.idl_type
return (has_extended_attribute_value(method, 'CallWith', 'ScriptState') or
'ImplementedInPrivateScript' in extended_attributes or
'RaisesException' in extended_attributes or
idl_type.is_union_type or
idl_type.is_explicit_nullable)
def method_context(interface, method, is_visible=True):
arguments = method.arguments
extended_attributes = method.extended_attributes
idl_type = method.idl_type
is_static = method.is_static
name = method.name
if is_visible:
idl_type.add_includes_for_type(extended_attributes)
this_cpp_value = cpp_value(interface, method, len(arguments))
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
is_call_with_script_arguments = has_extended_attribute_value(method, 'CallWith', 'ScriptArguments')
if is_call_with_script_arguments:
includes.update(['bindings/core/v8/ScriptCallStackFactory.h',
'core/inspector/ScriptArguments.h'])
is_call_with_script_state = has_extended_attribute_value(method, 'CallWith', 'ScriptState')
is_call_with_this_value = has_extended_attribute_value(method, 'CallWith', 'ThisValue')
if is_call_with_script_state or is_call_with_this_value:
includes.add('bindings/core/v8/ScriptState.h')
# [CheckSecurity]
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_receiver = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Receiver') and
not is_do_not_check_security)
is_check_security_for_return_value = (
has_extended_attribute_value(method, 'CheckSecurity', 'ReturnValue'))
if is_check_security_for_receiver or is_check_security_for_return_value:
includes.add('bindings/core/v8/BindingSecurity.h')
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
|
if is_custom_element_callbacks:
includes.add('core/dom/custom/CustomElementProcessingStack.h')
is_raises_exception = 'RaisesException' in extended_attributes
is_custom_cal
|
l_prologue = has_extended_attribute_value(method, 'Custom', 'CallPrologue')
is_custom_call_epilogue = has_extended_attribute_value(method, 'Custom', 'CallEpilogue')
is_post_message = 'PostMessage' in extended_attributes
if is_post_message:
includes.add('bindings/core/v8/SerializedScriptValueFactory.h')
includes.add('core/dom/DOMArrayBuffer.h')
includes.add('core/dom/MessagePort.h')
includes.add('core/frame/ImageBitmap.h')
if 'LenientThis' in extended_attributes:
raise Exception('[LenientThis] is not supported for operations.')
if 'APIExperimentEnabled' in extended_attributes:
includes.add('core/experiments/ExperimentalFeatures.h')
includes.add('core/inspector/ConsoleMessage.h')
argument_contexts = [
argument_context(interface, method, argument, index, is_visible=is_visible)
for index, argument in enumerate(arguments)]
return {
'activity_logging_world_list': v8_utilities.activity_logging_world_list(method), # [ActivityLogging]
'api_experiment_enabled': v8_utilities.api_experiment_enabled_function(method), # [APIExperimentEnabled]
'api_experiment_enabled_per_interface': v8_utilities.api_experiment_enabled_function(interface), # [APIExperimentEnabled]
'arguments': argument_contexts,
'argument_declarations_for_private_script':
argument_declarations_for_private_script(interface, method),
'cpp_type': (v8_types.cpp_template_type('Nullable', idl_type.cpp_type)
if idl_type.is_explicit_nullable else idl_type.cpp_type),
'cpp_value': this_cpp_value,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'custom_registration_extended_attributes':
CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES.intersection(
extended_attributes.iterkeys()),
'deprecate_as': v8_utilities.deprecate_as(method), # [DeprecateAs]
'exposed_test': v8_utilities.exposed(method, interface), # [Exposed]
# TODO(yukishiino): Retire has_custom_registration flag. Should be
# replaced with V8DOMConfiguration::PropertyLocationConfiguration.
'has_custom_registration':
v8_utilities.has_extended_attribute(
method, CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES),
'has_exception_state':
is_raises_exception or
is_check_security_for_receiver or
any(argument for argument in arguments
if (argument.idl_type.name == 'SerializedScriptValue' or
argument_conversion_needs_exception_state(method, argument))),
'has_optional_argument_without_default_value':
any(True for argument_context in argument_contexts
if argument_context['is_optional_without_default_value']),
'idl_type': idl_type.base_type,
'is_api_experiment_enabled': v8_utilities.api_experiment_enabled_function(method) or v8_utilities.api_experiment_enabled_function(interface), # [APIExperimentEnabled]
'is_call_with_execution_context': has_extended_attribute_value(method, 'CallWith', 'ExecutionContext'),
'is_call_with_script_arguments': is_call_with_script_arguments,
'is_call_wi
|
rogerhoward/funcaas
|
server.py
|
Python
|
mit
| 673
| 0.002972
|
#!/usr/bin/env python
"""Server run file.
Run by './server.py'
Access properties as 'config.property'
"""
import pkgutil, sys
from flask import Flask, Blueprint, render_template, reques
|
t
import config
app = Flask(__name__)
modules = pkgutil.iter_modules(path=[config.modules_directory_name])
for loader, mod_name, ispkg in modules:
if mod_name not in sys.modules:
loaded_mod = __import__(config.modules_directory_name + '.' + mod_name, fromlist=[mod_name])
for obj in vars(loaded_mod).values():
if isinstance(obj, Blueprint):
|
app.register_blueprint(obj)
app.run(debug=config.debug, host=config.host, port=config.port)
|
shahbazn/neutron
|
neutron/db/l3_dvr_db.py
|
Python
|
apache-2.0
| 31,701
| 0.000315
|
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
from neutron.common import utils as n_utils
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.i18n import _LI
from neutron import manager
from neutron.plugins.common import constants
from neutron.plugins.common import utils as p_utils
LOG = logging.getLogger(__name__)
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(l3_const.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_ROUTER_SNAT,
l3_const.DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "dis
|
tributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr
|
additions."""
router['distributed'] = is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, context, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_LI("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise n_exc.NotSupported(msg=_("Migration from distributed router "
"to centralized"))
elif (not router_db.extra_attributes.distributed and
router_res.get('distributed')):
# router should be disabled in order for upgrade
if router_db.admin_state_up:
msg = _('Cannot upgrade active router to distributed. Please '
'set router admin_state_up to False prior to upgrade.')
raise n_exc.BadRequest(resource='router', msg=msg)
# Notify advanced services of the imminent state transition
# for the router.
try:
kwargs = {'context': context, 'router': router_db}
registry.notify(
resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_db['id'],
reason=e)
def _update_distributed_attr(
self, context, router_id, router_db, data, gw_info):
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
def _update_router_db(self, context, router_id, data, gw_info):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data, gw_info)
migrating_to_distributed = (
not router_db.extra_attributes.distributed and
data.get('distributed') is True)
self._validate_router_migration(context, router_db, data)
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data, gw_info)
if migrating_to_distributed:
if router_db['gw_port_id']:
# If the Legacy router is getting migrated to a DVR
# router, make sure to create corresponding
# snat interface ports that are to be consumed by
# the Service Node.
if not self._create_snat_intf_ports_if_not_exists(
context.elevated(), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
cur_agents = self.list_l3_agents_hosting_router(
context, router_db['id'])['agents']
for agent in cur_agents:
self._unbind_router(context, router_db['id'],
agent['id'])
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""
Overriden here to handle deletion of dvr internal ports.
If there is a valid router update with gateway port to be deleted,
then go ahead and delete the csnat ports and the floatingip
agent gateway port associated with the dvr router.
"""
gw_ext_net_id = (
router.gw_port['network_id'] if router.gw_port else None)
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if (is_distributed_router(router) and
gw_ext_net_id != new_network):
self.delete_csnat_router_interface_ports(
context.elevated(), router)
# NOTE(Swami): Delete the Floatingip agent gateway port
# on all hosts when it is the last gateway port in the
# given external network.
filters = {'network_id': [gw_ext_net_id],
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
context.elevated(), None, gw_ext_net_id)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id, router, new_netwo
|
instana/python-sensor
|
instana/instrumentation/aws/triggers.py
|
Python
|
mit
| 10,212
| 0.001469
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
Module to handle the work related to the many AWS Lambda Triggers.
"""
import gzip
import json
import base64
from io import BytesIO
import opentracing as ot
from ...log import logger
STR_LAMBDA_TRIGGER = 'lambda.trigger'
def get_context(tracer, event):
# TODO: Search for more types of trigger context
is_proxy_event = is_api_gateway_proxy_trigger(event) or \
is_api_gateway_v2_proxy_trigger(event) or \
is_application_load_balancer_trigger(event)
if is_proxy_event:
return tracer.extract(ot.Format.HTTP_HEADERS, event.get('headers', {}), disable_w3c_trace_context=True)
return tracer.extract(ot.Format.HTTP_HEADERS, event, disable_w3c_trace_context=True)
def is_api_gateway_proxy_trigger(event):
for key in ["resource", "path", "httpMethod"]:
if key not in event:
return False
return True
def is_api_gateway_v2_proxy_trigger(event):
for key in ["version", "requestContext"]:
if key not in event:
return False
if event["version"] != "2.0":
return False
for key in ["apiId", "stage", "http"]:
if key not in event["requestContext"]:
return False
return True
def is_application_load_balancer_trigger(event):
if 'requestContext' in event and 'elb' in event['requestContext']:
return True
return False
def is_cloudwatch_trigger(event):
if "source" in event and 'detail-type' in event:
if event["source"] == 'aws.events' and event['detail-type'] == 'Scheduled Event':
return True
return False
def is_cloudwatch_logs_trigger(event):
if hasattr(event, 'get') and event.get("awslogs", False) is not False:
return True
else:
return False
def is_s3_trigger(event):
if "Records" in event:
if len(event["Records"]) > 0 and event["Records"][0]["eventSource"] == 'aws:s3':
return True
return False
def is_sqs_trigger(event):
if "Records" in event:
if len(event["Records"]) > 0 and event["Records"][0]["eventSource"] == 'aws:sqs':
return True
return False
def read_http_query_params(event):
"""
Used to parse the Lambda QueryString formats.
@param event: lambda event dict
@return: String in the form of "a=b&c=d"
"""
params = []
try:
if event is None or type(event) is not dict:
return ""
mvqsp = event.get('multiValueQueryStringParameters', None)
qsp = event.get('queryStringParameters', None)
if mvqsp is not None and type(mvqsp) is dict:
for key in mvqsp:
params.append("%s=%s" % (key, mvqsp[key]))
return "&".join(params)
elif qsp is not None and type(qsp) is dict:
for key in qsp:
params.append("%s=%s" % (key, qsp[key]))
return "&".join(params)
else:
return ""
except Exception:
logger.debug("read_http_query_params: ", exc_info=True)
return ""
def capture_extra_headers(event, span, extra_headers):
"""
Capture the headers specified in `extra_headers` from `event` and log them
as a tag in the span.
@param event: the lambda event
@param span: the lambda entry span
@param extra_headers: a list of http headers to capture
@return: None
"""
try:
event_headers = event.get("headers", None)
if event_headers is not None:
for custom_header in extra_headers:
for key in event_headers:
if key.lower() == custom_header.lower():
span.set_tag("http.header.%s" % custom_header, event_headers[key])
except Exception:
logger.debug("capture_extra_headers: ", exc_info=True)
def enrich_lambda_span(agent, span, event, context):
"""
Extract the required information about this Lambda run (and the trigger) and store the data
on `span`.
@param agent: the AWSLambdaAgent in use
@param span: the Lambda entry span
@param event: the lambda handler event
@param context: the lambda handler context
@return: None
"""
try:
span.set_tag('lambda.arn', agent.collector.get_fq_arn())
span.set_tag('lambda.name', context.function_name)
span.set_tag('lambda.version', context.function_version)
if event is None or type(event) is not dict:
logger.debug("enrich_lambda_span: bad event %s", type(event))
return
if is_api_gateway_proxy_trigger(event):
logger.debug("Detected as API Gateway Proxy Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.path_tpl', event["resource"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_api_gateway_v2_proxy_trigger(event):
logger.debug("Detected as API Gateway v2.0 Proxy Trigger")
reqCtx = event["requestContext"]
# trim optional HTTP method prefix
route_path = event["routeKey"].split(" ", 2)[-1]
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', reqCtx["http"]["method"])
span.set_tag('http.url', reqCtx["http"]["path"])
span.set_tag('http.path_tpl', route_path)
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_application_load_balancer_trigger(event):
logger.debug("Detected as Application Load Balancer Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:application.load.balancer')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_cloudwatch_trigger(event):
logger.debug("Detected as Cloudwatch Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.events')
span.set_tag('data.lambda.cw.events.id', event['id'])
resources = event['resources']
resource_count = len(event['resources'])
if resource_count > 3:
resources = event['resources'][:3]
span.set_tag('lambda.cw.events.more', True)
else:
|
span.set_tag('lambda.cw.events.more', False)
report = []
for item in resources:
if len(item) > 200:
item = it
|
em[:200]
report.append(item)
span.set_tag('lambda.cw.events.resources', report)
elif is_cloudwatch_logs_trigger(event):
logger.debug("Detected as Cloudwatch Logs Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.logs')
try:
if 'awslogs' in event and 'data' in event['awslogs']:
data = event['awslogs']['data']
decoded_data = base64.b64decode(data)
decompressed_data = gzip.GzipFile(fileobj=BytesIO(decoded_data)).read()
log_data = json.loads(decompressed_data.decode('utf-8'))
span.set_tag('lambda.cw.logs.group', log_data.get('logGroup', None))
span.set_tag('lambda.cw.logs.stream', log_data.get('logStream', None))
if len(log_data['logEvents']) > 3:
span.set_tag('lambda.cw.logs.more', True)
events = log_data['logEvents'][:3]
else:
event
|
nil0x42/phpsploit
|
plugins/credentials/cloudcredgrab/plugin_args.py
|
Python
|
gpl-3.0
| 596
| 0.003356
|
import argparse
import ui.output
def help_format_cloudcredgrab(prog):
kwargs = dict()
kwargs['width'] = ui.output.columns()
kwargs['max_help_position'] = 34
format = argparse.HelpFormatter(prog, **kwargs)
return (format)
def parse(args):
parser = argparse.ArgumentParser(prog="cloudcredgrab", add_help=False, usage=argparse.SUPPRESS)
parser.formatter_class = help_format_cloudcredgrab
parser.add_argument('-u', '--username',
metavar="<USER>", default=None
|
)
parser.add_argument('platf
|
orm')
options = vars(parser.parse_args(args))
|
fdroidtravis/repomaker
|
repomaker/wsgi.py
|
Python
|
agpl-3.0
| 396
| 0
|
"""
WSGI config for repomaker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For m
|
ore information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi
|
import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repomaker.settings")
application = get_wsgi_application()
|
debasishbai/django_blog
|
blog/database_config.py
|
Python
|
mit
| 454
| 0.002203
|
import psycopg2
import urlparse
import os
def server_db():
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.c
|
onnect(database=url.path[1:], user=url.username, password=url.password, host=url.hostname, port=url.port)
cur = conn.cursor()
return cur
def lo
|
cal_db():
conn = psycopg2.connect(host="", user="", password="", dbname="")
cur = conn.cursor()
return cur
|
sprax/python
|
bin_pack.py
|
Python
|
lgpl-3.0
| 10,413
| 0.001825
|
#!/usr/bin/env python3
'''
@file: bin_pack.py
@auth: Sprax Lines
@date: 2018-02-07 00:19:39 Wed 07 Feb
Can the space requirements specified by bits be packed into the specified bins?
'''
from __future__ import print_function
from itertools import islice
# import pdb
# from pdb import set_trace
from datetime import datetime
from num import fibonaccis
from num import prime_gen
def excess_space(bins, bits):
''' total excess space '''
return sum(bins) - sum(bits)
def can_pack_track_rec(bins, num_usable, bits, num_unpacked, usable_space, needed_space):
'''
* Sorted recursion. Early return if largest item cannot fit in largest
remaining bin.
* @param bins
* @param num_usable
* @param bits
* @param num_unpacked
* @return True if can pack, else False
'''
if num_unpacked < 1:
return True
if num_usable < 1:
return False
j = num_unpacked - 1
k = num_usable - 1
# return False if the largest remaining bin cannot fit the largest
# num_unpacked item.
if bins[k] < bits[j]:
return False
# Use reverse order, assuming the inputs were sorted in ascending order.
for k in reversed(range(num_usable)):
diff_k_j = bins[k] - bits[j]
# expected to be True at beginning of loop
if diff_k_j >= 0:
swapping = False
# If the space left in this bin would be less than the
if diff_k_j < bits[0]:
# smallest item, then this bin would become unusable.
usable_space -= diff_k_j
# If the remaining usable space would not suffice,
if usable_space < needed_space:
# return False immediately, without decrementing, etc.
return False
# Need to swap the diminished bins[k] off the active list.
swapping = True
usable_space -= bits[j]
needed_space -= bits[j]
bins[k] = diff_k_j
if swapping:
num_usable -= 1
bins[k] = bins[num_usable]
bins[num_usable] = diff_k_j
else:
# Otherwise, sort the list by re-inserting diminished bin[k]
# value where it now belongs.
for rdx in reversed(range(k)):
if diff_k_j < bins[rdx]:
bins[rdx + 1] = bins[rdx]
else:
bins[rdx + 1] = diff_k_j
break
else:
# set_trace()
bins[0] = diff_k_j
# Exhaustive recursion: check all remaining solutions that start
# with item[j] packed in bin[rdx]
if can_pack_track_rec(bins, num_usable, bits, j, usable_space, needed_space):
return True
# failed, so swap back and increment.
if swapping:
bins[num_usable] = bins[k]
bins[k] = diff_k_j
usable_space += diff_k_j
num_usable += 1
usable_space += bits[j]
needed_space += bits[j]
bins[k] += bits[j]
return False
def can_pack_track(bins, bits):
'''returns True IFF bits can be packed into bins'''
usable_space = sum(bins)
needed_space = sum(bits)
excess = usable_space - needed_space
if excess < 0:
return False # return early: insufficient total space
sbins = sorted(bins) # make a sorted copy
sbits = sorted(bits)
if sbins[-1] < sbits[-1]:
return False # return early: max bin < max bit
if can_pack_track_rec(sbins, len(sbins), sbits, len(sbits), usable_space, needed_space):
# Change the original array. (Pass by value means bins = sbins would
# not.)
for idx, sbin in enumerate(sbins):
bins[idx] = sbin
return True
print("sbins after failure:", sbins)
return False
def can_pack(bins, bits):
''' uses the best method here '''
return can_pack_track(bins, bits)
def can_pack_naive(bins, bits):
''' uses naive method '''
packed = [False] * len(bits)
return can_pack_naive_rec(bins, bits, packed)
def can_pack_naive_rec(bins, bits, packed):
'''
Naive exhaustive recursion, no early failure (as when sum(bins) <
sum(
|
bits)), no sorting.
Implementation: Naive exhaustive recursion with supplementary array.
Complexity: Time O(N!), additional space O(N).
* Tries to fit bits into bins in the original order given.
* @param bins
* @param bits
* @param packed
* @return
'''
if all(packed):
return True
for i in range(len(bits)):
if not packed
|
[i]:
# Exhaustive: check all remaining solutions that start with item[i]
# packed in some bin[j]
packed[i] = True
for j in range(len(bins)):
if bins[j] >= bits[i]:
# deduct item amount from bin and try to pack the rest
bins[j] -= bits[i]
if can_pack_naive_rec(bins, bits, packed):
return True # success: return
bins[j] += bits[i] # failure: restore item amount to bin
packed[i] = False
return False
###############################################################################
def show_wrong(result, expected):
''' show result if unexpected '''
if result == expected:
return 0
print("Wrong result: %s, expected: %s\n" % (result, expected))
return 1
def test_can_pack(packer, bins, bits, verbose, name, number, expected):
''' the basic test function '''
result = False
excess = excess_space(bins, bits)
if verbose > 0:
print(" Test can_pack: %s: %d" % (name, number))
print("bins to fill:", bins)
print("bits to pack:", bits)
sum_bins = sum(bins)
sum_bits = sum(bits)
diff = sum_bins - sum_bits
assert diff == excess
print("bin space - bits space: %d - %d = %d" % (sum_bins, sum_bits, diff))
if excess < 0:
print("Insufficient total bin space.")
else:
# Test the interface function:
beg_time = datetime.now()
result = packer(bins, bits)
run_time = datetime.now() - beg_time
if verbose > 0:
print("Pack bits in bins?", result)
print("Bin space after:", bins)
print("Run time millis: %7.2f" % (run_time.total_seconds() * 1000))
if result:
assert sum(bins) == excess
return show_wrong(result, expected)
def pass_fail(num_wrong):
''' pass or fail string '''
return "PASS" if num_wrong == 0 else "FAIL"
def test_packer(packer, packer_name, level):
''' tests a can_pack method '''
test_name = "test_packer(" + packer_name + ")"
num_wrong = 0
test_num = 0
if level < 1:
test_num += 1
bins = [1, 1, 4]
bits = [2, 3]
num_wrong += test_can_pack(packer, bins, bits, 1, test_name, test_num, False)
test_num += 1
bins = [2, 2, 37]
bits = [4, 37]
num_wrong += test_can_pack(packer, bins, bits, 1, test_name, test_num, False)
test_num += 1
bins = [8, 16, 8, 32]
bits = [18, 4, 8, 4, 6, 6, 8, 8]
num_wrong += test_can_pack(packer, bins, bits, 1, test_name, test_num, True)
test_num += 1
limits = [1, 3]
needs = [4]
num_wrong += test_can_pack(packer, limits, needs, 1, test_name, test_num, False)
test_num += 1
duffels = [2, 5, 2, 2, 6]
bags = [3, 3, 5]
num_wrong += test_can_pack(packer, duffels, bags, 1, test_name, test_num, True)
test_num += 1
sashes = [1, 2, 3, 4, 5, 6, 8, 9]
badges = [1, 4, 6, 6, 8, 8]
num_wrong += test_can_pack(packer, sashes, badges, 1, test_name, test_num, False)
if level > 0:
test_num += 1
crates = list(fibonaccis.fib_generate(11, 1))
boxes = list(islice(prime_gen.sieve(), 12))
boxes.append(2
|
googleapis/gapic-generator-python
|
tests/integration/goldens/logging/samples/generated_samples/logging_v2_generated_config_service_v2_delete_sink_sync.py
|
Python
|
apache-2.0
| 1,381
| 0.000724
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License
|
is distributed on an "AS
|
IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteSink
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_DeleteSink_sync]
from google.cloud import logging_v2
def sample_delete_sink():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.DeleteSinkRequest(
sink_name="sink_name_value",
)
# Make the request
client.delete_sink(request=request)
# [END logging_v2_generated_ConfigServiceV2_DeleteSink_sync]
|
GeneralizedLearningUtilities/SuperGLU
|
python_module/SuperGLU/Services/TextProcessing/Tests/Inflect/test_classical_herd.py
|
Python
|
mit
| 835
| 0.008383
|
from nose.tools import eq_
import inflect
def test_ancient_1():
p = inflect.engine()
|
# DEFAULT...
eq_(p.plural_noun('wildebeest'), 'wildebeests', msg="classical 'herd' not active")
# "person" PLURALS ACTIVATED...
p.classical(herd=True)
eq_(p.plural_noun('wildebeest'), 'wildebeest', msg="classical 'herd' active")
# OTHER CLASSICALS NOT ACTIVATED...
eq_(p.plural_noun('formula'), 'formulas', msg="classical 'ancien
|
t' active")
eq_(p.plural_noun('error', 0), 'errors', msg="classical 'zero' not active")
eq_(p.plural_noun('Sally'), 'Sallys', msg="classical 'names' active")
eq_(p.plural_noun('brother'), 'brothers', msg="classical 'all' not active")
eq_(p.plural_noun('person'), 'people', msg="classical 'persons' not active")
|
PanDAWMS/autopyfactory
|
autopyfactory/authmanager.py
|
Python
|
apache-2.0
| 8,906
| 0.011004
|
#!/usr/bin/env python
"""
A credential management component for AutoPyFactory
"""
import logging
import math
import os
import pwd, grp
import sys
import threading
import time
import socket
# Added to support running module as script from arbitrary location.
from os.path import dirname, realpath, sep, pardir
fullpathlist = realpath(__file__).split(sep)
prepath = sep.join(fullpathlist[:-2])
sys.path.insert(0, prepath)
import pluginmanager
import autopyfactory
###from autopyfactory.plugins.auth.X509 import X509
###from autopyfactory.plugins.auth.SSH import SSH
from autopyfactory.apfexceptions import InvalidAuthFailure
from autopyfactory.configloader import Config, ConfigsDiff
class AuthManager(object):
"""
Manager to maintain multiple credential Handlers, one for each target account.
For some handlers, if they need to perform periodic checks, they will be run
as threads. Others, which only hold information, will just be objects.
"""
def __init__(self, factory=None):
self.log = logging.getLogger('autopyfactory')
self.log.info("Creating new authmanager...")
self.aconfig = Config()
self.handlers = []
self.factory = factory
if factory:
self.sleep = int(self.factory.fcl.get('Factory', 'authmanager.sleep'))
else:
self.sleep = 5
def reconfig(self, newconfig):
hdiff = ConfigsDiff(self.aconfig, newconfig)
self.aconfig = newconfig
self._addhandlers(hdiff.added())
def _addhandlers(self, newsections):
for sect in newsections:
try:
pclass = self.aconfig.get(sect, 'plugin')
except Exception as e:
self.log.warn("No plugin attribute for section %s" % sect)
if pclass == 'X509':
self.log.debug("Creating X509 handler for %s" % sect )
authpluginname = self.aconfig.get(sect, 'plugin')
x509h = pluginmanager.getplugin(['autopyfactory', 'plugins', 'authmanager', 'auth'], authpluginname, self, self.aconfig, sect)
self.handlers.append(x509h)
elif pclass == 'SSH':
self.log.debug("Creating SSH handler for %s" % sect )
|
authpluginname = self.aconfig.get(sect, 'plugin')
sshh = pluginmanager.getplugin(['autopyfactory', 'plugins', 'authmanager', 'auth'], authpluginname, self, self.aconfig, sect)
self.handlers.append(sshh)
else:
self.log.warn("Unrecognized auth plugin %s" % pclass )
def activate(self):
"""
start all Handle
|
rs, if needed
"""
for ah in self.handlers:
if isinstance(ah, threading.Thread) :
self.log.debug("Handler [%s] is a thread. Starting..." % ah.name)
ah.start()
else:
self.log.debug("Handler [%s] is not a thread. No action." % ah.name)
def listNames(self):
"""
Returns list of valid names of Handlers in this Manager.
"""
names = []
for h in self.handlers:
names.append(h.name)
return names
#
# API for X509Handler
#
def getProxyPath(self, profilelist):
"""
Check all the handlers for matching profile name(s).
profiles argument is a list
"""
pp = None
for profile in profilelist:
self.log.debug("Getting proxy path for profile %s" % profile)
ph = None
for h in self.handlers:
self.log.debug("Finding handler. Checking %s" % h.name)
if h.name == profile:
ph = h
break
if ph:
self.log.debug("Found handler %s. Getting proxypath..." % ph.name)
pp = ph.getProxyPath()
self.log.debug("Proxypath is %s" % pp)
if pp:
break
if not pp:
subject = "Proxy problem on %s" % self.factory.factoryid
messagestring = "Unable to get valid proxy from configured profiles: %s" % profilelist
self.factory.sendAdminEmail(subject, messagestring)
raise InvalidAuthFailure("Problem getting proxy for profile %s" % profilelist)
return pp
#
# API for SSHKeyHandler
#
def getSSHKeyPair(self, profile):
"""
Returns tuple (public, private, pass) key/phrase string from profile.
"""
pass
def getSSHKeyPairPaths(self, profile):
"""
Returns tuple (public, private, pass) key/passfile paths to files from profile.
"""
h = self._getHandler(profile)
pub = h.getSSHPubKeyFilePath()
priv = h.getSSHPrivKeyFilePath()
pasf = h.getSSHPassFilePath()
self.log.info('Got file paths for pub, priv, pass for SSH profile %s' % profile)
return (pub,priv,pasf)
def _getHandler(self, profile):
"""
"""
handler = None
for h in self.handlers:
self.log.debug("Finding handler. Checking %s" % h.name)
if h.name == profile:
self.log.debug("Found handler for %s" % h.name)
handler = h
if handler is None:
raise InvalidAuthFailure('No handler for %s ' % profile)
return handler
if __name__ == '__main__':
import getopt
import sys
import os
from ConfigParser import ConfigParser, SafeConfigParser
debug = 0
info = 0
aconfig_file = None
default_configfile = os.path.expanduser("~/etc/auth.conf")
usage = """Usage: authmanager.py [OPTIONS]
OPTIONS:
-h --help Print this message
-d --debug Debug messages
-v --verbose Verbose information
-c --config Config file [~/etc/auth.conf]"""
# Handle command line options
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,
"c:hdvt",
["config=",
"help",
"debug",
"verbose",
])
except getopt.GetoptError as error:
print( str(error))
print( usage )
sys.exit(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit()
elif opt in ("-c", "--config"):
aconfig_file = arg
elif opt in ("-d", "--debug"):
debug = 1
elif opt in ("-v", "--verbose"):
info = 1
# Check python version
major, minor, release, st, num = sys.version_info
# Set up logging, handle differences between Python versions...
# In Python 2.3, logging.basicConfig takes no args
#
FORMAT23="[ %(levelname)s ] %(asctime)s %(filename)s (Line %(lineno)d): %(message)s"
FORMAT24=FORMAT23
FORMAT25="[%(levelname)s] %(asctime)s %(module)s.%(funcName)s(): %(message)s"
FORMAT26=FORMAT25
if major == 2:
if minor ==3:
formatstr = FORMAT23
elif minor == 4:
formatstr = FORMAT24
elif minor == 5:
formatstr = FORMAT25
elif minor == 6:
formatstr = FORMAT26
elif minor == 7:
formatstr = FORMAT26
log = logging.getLogger('autopyfactory')
hdlr = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(FORMAT23)
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
if debug:
log.setLevel(logging.DEBUG) # Override with command line switches
if info:
log.setLevel(logging.INFO) # Override with command line switches
log.debug("Loggin
|
keenondrums/sovrin-node
|
sovrin_common/test/types/test_pool_upg_schema.py
|
Python
|
apache-2.0
| 1,055
| 0.000948
|
import pytest
from sovrin_common.types import ClientPoolUpgradeOperation
from collections import OrderedDict
from plenum.common.messages.fields import ConstantField, ChooseField, VersionField, MapField, Sha256HexField, \
NonNegativeNumberField, LimitedLengthStringField, BooleanField
EXPECTED_ORDERED_FIELDS = OrderedDict([
("type", ConstantField),
('action', Ch
|
ooseField),
("version", VersionField),
('schedule', MapField),
('sha256', Sha256HexField),
('timeout', NonNegativeNumberField),
('justification', LimitedLengthStringField),
("name", LimitedLengthStringField),
("force", BooleanField),
("
|
reinstall", BooleanField),
])
def test_has_expected_fields():
actual_field_names = OrderedDict(ClientPoolUpgradeOperation.schema).keys()
assert actual_field_names == EXPECTED_ORDERED_FIELDS.keys()
def test_has_expected_validators():
schema = dict(ClientPoolUpgradeOperation.schema)
for field, validator in EXPECTED_ORDERED_FIELDS.items():
assert isinstance(schema[field], validator)
|
SOMA-PainKiller/ECAReview
|
ECARUSS/wsgi.py
|
Python
|
mit
| 391
| 0.002558
|
"""
WSGI config for ECAPlanet project.
It exposes the WSGI callable as a module-level variable na
|
med ``application``.
For more information on this file, see
https://docs.djangoprojec
|
t.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ECARUSS.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
beav/pulp
|
server/pulp/plugins/util/nectar_config.py
|
Python
|
gpl-2.0
| 2,490
| 0.001205
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-lice
|
nses/gpl-2.0.txt.
"""
Contains functions related to working with the Nectar downloading
|
library.
"""
from functools import partial
from nectar.config import DownloaderConfig
from pulp.common.plugins import importer_constants as constants
def importer_config_to_nectar_config(importer_config):
"""
Translates the Pulp standard importer configuration into a DownloaderConfig instance.
:param importer_config: use the PluginCallConfiguration.flatten method to retrieve a
single dict view on the configuration
:type importer_config: dict
:rtype: nectar.config.DownloaderConfig
"""
# Mapping of importer config key to downloader config key
translations = (
(constants.KEY_SSL_CA_CERT, 'ssl_ca_cert'),
(constants.KEY_SSL_VALIDATION, 'ssl_validation'),
(constants.KEY_SSL_CLIENT_CERT, 'ssl_client_cert'),
(constants.KEY_SSL_CLIENT_KEY, 'ssl_client_key'),
(constants.KEY_PROXY_HOST, 'proxy_url'),
(constants.KEY_PROXY_PORT, 'proxy_port'),
(constants.KEY_PROXY_USER, 'proxy_username'),
(constants.KEY_PROXY_PASS, 'proxy_password'),
(constants.KEY_MAX_DOWNLOADS, 'max_concurrent'),
(constants.KEY_MAX_SPEED, 'max_speed'),
)
download_config_kwargs = {}
adder = partial(_safe_add_arg, importer_config, download_config_kwargs)
map(adder, translations)
download_config = DownloaderConfig(**download_config_kwargs)
return download_config
def _safe_add_arg(importer_config, dl_config, keys_tuple):
"""
Utility to only set values in the downloader config if they are present in the importer's
config.
:type importer_config: dict
:type dl_config: dict
:param keys_tuple: tuple of importer key to download config key
:type keys_tuple: (str, str)
"""
if keys_tuple[0] in importer_config:
dl_config[keys_tuple[1]] = importer_config[keys_tuple[0]]
|
flavio-casacurta/File-FixedS
|
calc_length.py
|
Python
|
mit
| 3,848
| 0.002339
|
# -*- coding: utf-8 -*-
"""
Created on 27/04/2015
@author: C&C - HardSoft
"""
from util.HOFs import *
from util.CobolPatterns import *
from util.homogenize import Homogenize
def calc_length(copy):
if isinstance(copy, list):
book = copy
else:
if isinstance(copy, str):
book = copy.splitlines()
else:
book = []
lines = Homogenize(book)
havecopy = filter(isCopy, lines)
if havecopy:
bkm = ''.join(havecopy[0].split('COPY')[1].replace('.', '').split())
msg = 'COPY {} deve ser expandido.'.format(bkm)
return {'retorno': False, 'msg': msg, 'lrecl': 0}
lrecl = 0
redefines = False
occurs = 0
dicoccurs = {}
level_redefines = 0
for line in lines:
match = CobolPatterns.row_pattern.match(line.strip())
if not match:
continue
match = match.groupdict()
if not match['level']:
continue
if 'REDEFINES' in line and not match['redefines']:
match['redefines'] = CobolPatterns.row_pattern_redefines.search(line).groupdict().get('redefines')
if 'OCCURS' in line and not match['occurs']:
match['occurs'] = CobolPatterns.row_pattern_occurs.search(line).groupdict().get('occurs')
level = int(match['level'])
if redefines:
if level > level_redefines:
continue
redefines = False
level_redefines = 0
if match['redefines']:
level_redefines = level
redefines = True
continue
if occurs:
if level > dicoccurs[occurs]['level']:
if match['occurs']:
occurs += 1
attrib = {}
attrib['occ'] = int(match['occurs'])
attrib['level'] = level
attrib['length'] = 0
dicoccurs[occurs] = attrib
if match['pic']:
dicoccurs[occurs]['length'] += FieldLength(match['pic'], match['usage'])
continue
while True:
if occurs == 1:
lrecl += dicoccurs[occurs]['length'] * dicoccurs[occurs]['occ']
else:
dicoccurs[occurs-1]['length'] += dicoccurs[occurs]['length'] * dicoccurs[occurs]['occ']
del dicoccurs[occurs]
occurs -= 1
if not occurs:
break
if level > dicoccurs[occurs]['level']:
break
if match['occurs']:
occurs += 1
attrib = {}
attrib['occ'] = int(match['occurs'])
attrib['level'] = level
attrib['length'] = 0
dicoccurs[occurs] = attrib
if match['pic']:
if occurs:
dicoccurs[occurs]['length'] += FieldLength(match['pic'], match['usage'])
else:
lrecl += FieldLength(match['pic'], match['usage'])
return {'retorno': True, 'msg': None, 'lrecl': lrecl}
def FieldLength(pic_str, usage):
if pic_str[0] == 'S':
pic_str = pic_str[1:]
while True:
match = CobolPatterns.pic_pattern_repeats.search(pic_str)
if not match:
break
match = match.groupdict()
expanded_str = match['constant'] * int(match['repeat'])
pic_str = CobolPatterns.pic_pattern_repeats.sub(expanded_str, pic_str, 1)
len_field = len(pic_str.replace('V', ''))
if no
|
t usage:
usage = 'DISPLAY'
if 'COMP-3' in usage or 'COMPUTATIONAL-3' in usage:
len_field = len_field / 2 + 1
elif 'COMP' in usage or 'COMPUTATIONAL' in usage or 'BINARY' in usage:
len_field = len_field / 2
elif 'SIGN' in usage:
len_field += 1
return len
|
_field
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.2/salt/modules/x509.py
|
Python
|
apache-2.0
| 46,018
| 0.003607
|
# -*- coding: utf-8 -*-
'''
Manage X509 certificates
.. versionadded:: 2015.8.0
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import hashlib
import glob
import random
import ctypes
import tempfile
import yaml
import re
import datetime
import ast
# Import salt libs
import salt.utils
import salt.exceptions
import salt.ext.six as six
from salt.utils.odict import OrderedDict
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
# Import 3rd Party Libs
try:
import M2Crypto
HAS_M2 = True
except ImportError:
HAS_M2 = False
__virtualname__ = 'x509'
log = logging.getLogger(__name__)
EXT_NAME_MAPPINGS = OrderedDict([
('basicConstraints', 'X509v3 Basic Constraints'),
('keyUsage', 'X509v3 Key Usage'),
('extendedKeyUsage', 'X509v3 Extended Key Usage'),
('subjectKeyIdentifier', 'X509v3 Subject Key Identifier'),
('authorityKeyIdentifier', 'X509v3 Authority Key Identifier'),
('issuserAltName', 'X509v3 Issuer Alternative Name'),
('authorityInfoAccess', 'X509v3 Authority Info Access'),
('subjectAltName', 'X509v3 Subject Alternative Name'),
('crlDistributionPoints', 'X509v3 CRL Distribution Points'),
('issuingDistributionPoint', 'X509v3 Issuing Distribution Point'),
('certificatePolicies', 'X509v3 Certificate Policies'),
('policyConstraints', 'X509v3 Policy Constraints'),
('inhibitAnyPolicy', 'X509v3 Inhibit Any Policy'),
('nameConstraints', 'X509v3 Name Constraints'),
('noCheck', 'X509v3 OCSP No Check'),
('nsComment', 'Netscape Comment'),
('nsCertType', 'Netscape Certificate Type'),
])
CERT_DEFAULTS = {'days_valid': 365, 'version': 3, 'serial_bits': 64, 'algorithm': 'sha256'}
def __virtual__():
'''
only load this module if m2crypto is available
'''
if HAS_M2:
return __virtualname__
else:
return (False, 'Could not load x509 module, m2crypto unavailable')
class _Ctx(ctypes.Structure):
'''
This is part of an ugly hack to fix an ancient bug in M2Crypto
https://bugzilla.osafoundation.org/show_bug.cgi?id=7530#c13
'''
# pylint: disable=too-few-public-methods
_fields_ = [('flags', ctypes.c_int),
('issuer_cert', ctypes.c_void_p),
('subject_cert', ctypes.c_void_p),
('subject_req', ctypes.c_void_p),
('crl', ctypes.c_void_p),
('db_meth', ctypes.c_void_p),
('db', ctypes.c_void_p),
]
def _fix_ctx(m2_ctx, issuer=None):
'''
This is part of an ugly hack to fix an ancient bug in M2Crypto
https://bugzilla.osafoundation.org/show_bug.cgi?id=7530#c13
'''
ctx = _Ctx.from_address(int(m2_ctx)) # pylint: disable=no-member
ctx.flags = 0
ctx.subject_cert = None
ctx.subject_req = None
ctx.crl = None
if issuer is None:
ctx.issuer_cert = None
else:
ctx.issuer_cert = int(issuer.x509)
def _new_extension(name, value, critical=0, issuer=None, _pyfree=1):
'''
Create new X509_Extension, This is required because M2Crypto doesn't support
getting the publickeyidentifier from the issuer to create the authoritykeyidentifier
extension.
'''
if name == 'subjectKeyIdentifier' and \
value.strip('0123456789abcdefABCDEF:') is not '':
raise salt.exceptions.SaltInvocationError('value must be precomputed hash')
lhash = M2Crypto.m2.x509v3_lhash() # pylint: disable=no-member
ctx = M2Crypto.m2.x509v3_set_conf_lhash(lhash) # pylint: disable=no-member
#ctx not zeroed
_fix_ctx(ctx, issuer)
x509_ext_ptr = M2Crypto.m2.x509v3_ext_conf(lhash, ctx, name, value) # pylint: disable=no-member
#ctx,lhash freed
if x509_ext_ptr is None:
raise Exception
x509_ext = M2Crypto.X509.X509_Extension(x509_ext_ptr, _pyfree)
x509_ext.set_critical(critical)
return x509_ext
# The next four functions are more hacks because M2Crypto doesn't support getting
# Extensions from CSRs. https://github.com/martinpaljak/M2Crypto/issues/63
def _parse_openssl_req(csr_filename):
'''
Parses openssl command line output, this is a workaround for M2Crypto's
inability to get them from CSR objects.
'''
cmd = ('openssl req -text -noout -in {0}'.format(csr_filename))
output = __salt__['cmd.run_stdout'](cmd)
output = re.sub(r': rsaEncryption', ':', output)
output = re.sub(r'[0-9a-f]{2}:', '', output)
return yaml.safe_load(output)
def _get_csr_extensions(csr):
'''
Returns a list of dicts containing the name, value and critical value of
any extension contained in a csr object.
'''
ret = OrderedDict()
csrtempfile = tempfile.NamedTemporaryFile()
csrtempfile.write(csr.as_pem())
csrtempfile.flush()
csryaml = _parse_openssl_req(csrtempfile.name)
csrtempfile.close()
if csryaml and 'Requested Extensions' in csryaml['Certificate Request']['Data']:
csrexts = csryaml['Certificate Request']['Data']['Requested Extensions']
for short_name, long_name in six.iteritems(EXT_NAME_MAPPINGS):
if long_name in csrexts:
ret[short_name] = csrexts[long_name]
return ret
# None of python libraries read CRLs. Again have to hack it with the openssl CLI
def _parse_openssl_crl(crl_filename):
'''
Parses openssl command line output, this is a workaround for
|
M2Crypto's
inability to get them from CSR objects.
'''
cmd = ('openssl crl -text -noout -in {0}'.format(crl_filename))
output = __salt__['cmd.run_stdout'](cmd)
crl = {}
|
for line in output.split('\n'):
line = line.strip()
if line.startswith('Version '):
crl['Version'] = line.replace('Version ', '')
if line.startswith('Signature Algorithm: '):
crl['Signature Algorithm'] = line.replace('Signature Algorithm: ', '')
if line.startswith('Issuer: '):
line = line.replace('Issuer: ', '')
subject = {}
for sub_entry in line.split('/'):
if '=' in sub_entry:
sub_entry = sub_entry.split('=')
subject[sub_entry[0]] = sub_entry[1]
crl['Issuer'] = subject
if line.startswith('Last Update: '):
crl['Last Update'] = line.replace('Last Update: ', '')
last_update = datetime.datetime.strptime(
crl['Last Update'], "%b %d %H:%M:%S %Y %Z")
crl['Last Update'] = last_update.strftime("%Y-%m-%d %H:%M:%S")
if line.startswith('Next Update: '):
crl['Next Update'] = line.replace('Next Update: ', '')
next_update = datetime.datetime.strptime(
crl['Next Update'], "%b %d %H:%M:%S %Y %Z")
crl['Next Update'] = next_update.strftime("%Y-%m-%d %H:%M:%S")
if line.startswith('Revoked Certificates:'):
break
if 'No Revoked Certificates.' in output:
crl['Revoked Certificates'] = []
return crl
output = output.split('Revoked Certificates:')[1]
output = output.split('Signature Algorithm:')[0]
rev = []
for revoked in output.split('Serial Number: '):
if not revoked.strip():
continue
rev_sn = revoked.split('\n')[0].strip()
revoked = rev_sn + ':\n' + '\n'.join(revoked.split('\n')[1:])
rev_yaml = yaml.safe_load(revoked)
for rev_item, rev_values in six.iteritems(rev_yaml): # pylint: disable=unused-variable
if 'Revocation Date' in rev_values:
rev_date = datetime.datetime.strptime(
rev_values['Revoc
|
ticklemepierce/osf.io
|
website/addons/osfstorage/utils.py
|
Python
|
apache-2.0
| 3,726
| 0.001074
|
# -*- coding: utf-8 -*-
import os
import httplib
import logging
import functools
from modularodm.exceptions import ValidationValueError
from fram
|
ework.exceptions import HTTPError
from framework.analytics import update_counter
from website.addons.osfstorage import settings
logger = logging.getLogger(__name__)
LOCATION_KEYS = ['service', settings.WATERBUTLER_RESOURCE, 'object']
def update_analytics(node, file_id, version_idx):
"""
:param Node node: Root node to update
:param str file_id: The _id field of a filenode
:param int version_idx: Zero-based version index
"""
update_counter(u'download:{0}:{1}'.format(node._id,
|
file_id))
update_counter(u'download:{0}:{1}:{2}'.format(node._id, file_id, version_idx))
def serialize_revision(node, record, version, index, anon=False):
"""Serialize revision for use in revisions table.
:param Node node: Root node
:param FileRecord record: Root file record
:param FileVersion version: The version to serialize
:param int index: One-based index of version
"""
if anon:
user = None
else:
user = {
'name': version.creator.fullname,
'url': version.creator.url,
}
return {
'user': user,
'index': index + 1,
'date': version.date_created.isoformat(),
'downloads': record.get_download_count(version=index),
'md5': version.metadata.get('md5'),
'sha256': version.metadata.get('sha256'),
}
SIGNED_REQUEST_ERROR = HTTPError(
httplib.SERVICE_UNAVAILABLE,
data={
'message_short': 'Upload service unavailable',
'message_long': (
'Upload service is not available; please retry '
'your upload in a moment'
),
},
)
def get_filename(version_idx, file_version, file_record):
"""Build name for downloaded file, appending version date if not latest.
:param int version_idx: One-based version index
:param FileVersion file_version: Version to name
:param FileRecord file_record: Root file object
"""
if version_idx == len(file_record.versions):
return file_record.name
name, ext = os.path.splitext(file_record.name)
return u'{name}-{date}{ext}'.format(
name=name,
date=file_version.date_created.isoformat(),
ext=ext,
)
def validate_location(value):
for key in LOCATION_KEYS:
if key not in value:
raise ValidationValueError
def must_be(_type):
"""A small decorator factory for OsfStorageFileNode. Acts as a poor mans
polymorphic inheritance, ensures that the given instance is of "kind" folder or file
"""
def _must_be(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if not self.kind == _type:
raise ValueError('This instance is not a {}'.format(_type))
return func(self, *args, **kwargs)
return wrapped
return _must_be
def copy_files(src, target_settings, parent=None, name=None):
"""Copy the files from src to the target nodesettings
:param OsfStorageFileNode src: The source to copy children from
:param OsfStorageNodeSettings target_settings: The node settings of the project to copy files to
:param OsfStorageFileNode parent: The parent of to attach the clone of src to, if applicable
"""
cloned = src.clone()
cloned.parent = parent
cloned.name = name or cloned.name
cloned.node_settings = target_settings
if src.is_file:
cloned.versions = src.versions
cloned.save()
if src.is_folder:
for child in src.children:
copy_files(child, target_settings, parent=cloned)
return cloned
|
cancan101/StarCluster
|
starcluster/plugins/users.py
|
Python
|
lgpl-3.0
| 8,400
| 0.000476
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import os
import posixpath
from starcluster import utils
from starcluster import static
from starcluster import exception
from starcluster import clustersetup
from starcluster.logger import log
class CreateUsers(clustersetup.DefaultClusterSetup):
"""
Plugin for creating one or more cluster users
"""
DOWNLOAD_KEYS_DIR = os.path.join(static.STARCLUSTER_CFG_DIR, 'user_keys')
BATCH_USER_FILE = "/root/.users/users.txt"
def __init__(self, num_users=None, usernames=None, download_keys=None,
download_keys_dir=None):
if usernames:
usernames = [user.strip() for user in usernames.split(',')]
if num_users:
try:
num_users = int(num_users)
except ValueError:
raise exception.BaseException("num_users must be an integer")
elif usernames:
num_users = len(usernames)
else:
raise exception.BaseException(
"you must provide num_users or usernames or both")
if usernames and num_users and len(usernames) != num_users:
raise exception.BaseException(
"only %d usernames provided - %d required" %
(len(usernames), num_users))
self._num_users = num_users
if not usernames:
usernames = ['user%.3d' % i for i in range(1, num_users + 1)]
log.info("CreateUsers: %s" % usernames)
self._usernames = usernames
self._download_keys = str(download_keys).lower() == "true"
self._download_keys_dir = download_keys_dir or self.DOWNLOAD_KEYS_DIR
super(CreateUsers, self).__init__()
def run(self, nodes, master, user, user_shell, volumes):
self._nodes = nodes
self._master = master
self._user = user
self._user_shell = user_shell
self._volumes = volumes
log.info("Creating %d cluster users" % self._num_users)
newusers = self._get_newusers_batch_file(master, self._usernames,
user_shell)
for node in nodes:
self.pool.simple_job(node.ssh.execute,
("echo -n '%s' | xargs -L 1 -I '{}' sh -c 'echo {} | newusers'" % newusers),
jobid=node.alias)
self.pool.wait(numtasks=len(nodes))
for node in nodes:
add_group_str = "grep -q dl-fte /etc/group || groupadd -g 10123 dl-fte"
self.pool.simple_job(node.ssh.execute,
(add_group_str),
jobid=node.alias)
add_user_str = "; ".join(
["usermod -a -G docker,dl-fte %s" % u for u in self._usernames])
self.pool.simple_job(node.ssh.execute,
(add_user_str),
jobid=node.alias)
self.pool.wait(numtasks=len(nodes))
log.info("Configuring passwordless ssh for %d cluster users" %
self._num_users)
pbar = self.pool.progress_bar.reset()
pbar.maxval = self._num_users
for i, user in enumerate(self._usernames):
master.generate_key_for_user(user, auth_new_key=True,
auth_conn_key=True)
master.add_to_known_hosts(user, nodes)
pbar.update(i + 1)
pbar.finish()
self._setup_scratch(nodes, self._usernames)
if self._download_keys:
self._download_user_keys(master, self._usernames)
def _download_user_keys(self, master, usernames):
pardir = posixpath.dirname(self.BATCH_USER_FILE)
bfile = posixpath.basename(self.BATCH_USER_FILE)
if not master.ssh.isdir(pardir):
master.ssh.makedirs(pardir)
log.info("Tarring all SSH keys for cluster users...")
for user in usernames:
master.ssh.execute(
"cp /home/%(user)s/.ssh/id_rsa %(keydest)s" %
dict(user=user, keydest=posixpath.join(pardir, user + '.rsa')))
cluster_tag = master.cluster_groups[0].name.replace(
static.SECURITY_GROUP_PREFIX, '')
tarfile = "%s-%s.tar.gz" % (cluster_tag, master.region.name)
master.ssh.execute("tar -C %s -czf ~/%s . --exclude=%s" %
(pardir, tarfile, bfile))
if not os.path.exists(self._download_keys_dir):
os.makedirs(self._download_keys_dir)
log.info("Copying cluster users SSH keys to: %s" %
os.path.join(self._download_keys_dir, tarfile))
master.ssh.get(tarfile, self._download_keys_dir)
master.ssh.unlink(tarfile)
def _get_newusers_batch_file(self, master, usernames, shell,
batch_file=None):
batch_file = batch_file or self.BATCH_USER_FILE
# False here to avoid the incorrect optimization
# (when new users are added)
if False and master.ssh.isfile(
|
batch_file):
bfile = master.ssh.remote_file(batch_file, 'r')
bfilecontents = bfile.read()
bfile.close()
return bfilecontents
bfilecontents = ''
tmpl = "%(username)s:%(password)s:%(uid)d:%(gid)d:"
tmpl += "Cluster user account %(username)s:"
tmpl += "/home/%(username)s:%(shell)s\n"
shpath = master.ssh.which(shell)[0]
ctx = dict(shell=shpath)
|
base_uid, base_gid = self._get_max_unused_user_id()
for user in usernames:
home_folder = '/home/%s' % user
if master.ssh.path_exists(home_folder):
s = master.ssh.stat(home_folder)
uid = s.st_uid
gid = s.st_gid
else:
uid = base_uid
gid = base_gid
base_uid += 1
base_gid += 1
passwd = utils.generate_passwd(8)
ctx.update(username=user, uid=uid, gid=gid, password=passwd)
bfilecontents += tmpl % ctx
pardir = posixpath.dirname(batch_file)
if not master.ssh.isdir(pardir):
master.ssh.makedirs(pardir)
bfile = master.ssh.remote_file(batch_file, 'w')
bfile.write(bfilecontents)
bfile.close()
return bfilecontents
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
self._nodes = nodes
self._master = master
self._user = user
self._user_shell = user_shell
self._volumes = volumes
log.info("Creating %d users on %s" % (self._num_users, node.alias))
newusers = self._get_newusers_batch_file(master, self._usernames,
user_shell)
node.ssh.execute("echo -n '%s' | xargs -L 1 -I '{}' sh -c 'echo {} | newusers'" % newusers)
log.info("Adding %s to known_hosts for %d users" %
(node.alias, self._num_users))
pbar = self.pool.progress_bar.reset()
pbar.maxval = self._num_users
for i, user in enumerate(self._usernames):
master.add_to_known_hosts(user, [node])
pbar.update(i + 1)
pbar.finish()
add_group_str = "grep -q dl-fte /etc/group || groupadd -g 10123 dl-fte"
node.ssh.execute(add_group_str)
add_user_str = "; ".join(
["usermod -a -G docker,dl-fte %s" % u for u in self._usernames])
node.ssh.execute(add_user_str)
|
mvaled/sentry
|
src/sentry/tasks/commits.py
|
Python
|
bsd-3-clause
| 7,974
| 0.001756
|
from __future__ import absolute_import
import logging
import six
from django.core.urlresolvers import reverse
from sentry.exceptions import InvalidIdentity, PluginError
from sentry.integrations.exceptions import IntegrationError
from sentry.models import Deploy, LatestRelease, Release, ReleaseHeadCommit, Repository, User
from sentry.plugins import bindings
from sentry.tasks.base import instrumented_task, retry
from sentry.utils.email import MessageBuilder
from sentry.utils.http import absolute_uri
logger = logging.getLogger(__name__)
def generate_invalid_identity_email(identity, commit_failure=False):
new_context = {
"identity": identity,
"auth_url": absolute_uri(reverse("socialauth_associate", args=[identity.provider])),
"commit_failure": commit_failure,
}
return MessageBuilder(
subject="Unable to Fetch Commits" if commit_failure else "Action Required",
context=new_context,
template="sentry/emails/identity-invalid.txt",
html_template="sentry/emails/identity-invalid.html",
)
def generate_fetch_commits_error_email(release, error_message):
new_context = {"release": release, "error_message": error_message}
return MessageBuilder(
subject="Unable to Fetch Commits",
context=new_context,
template="sentry/emails/unable-to-fetch-commits.txt",
html_template="sentry/emails/unable-to-fetch-commits.html",
)
# we're future proofing this function a bit so it could be used with other code
def handle_invalid_identity(identity, commit_failure=False):
# email the user
msg = generate_invalid_identity_email(identity, commit_failure)
msg.send_async(to=[identity.user.email])
# now remove the identity, as its invalid
identity.delete()
@instrumented_task(
name="sentry.tasks.commits.fetch_commits",
queue="commits",
default_retry
|
_delay=60 * 5,
max_retries=5,
)
@retry(exclude=(Release.DoesNotExist, User.DoesNotExist))
def fetch_commits(release_id, user_id, refs, prev_release_id=None, **kwargs):
# TODO(dcramer): this function could use some cleanup/refactoring as its a bit unwieldly
commit_list = []
release = Release.objects.get(id=release_id)
user = User.objects.get(id=user_id)
prev_release = None
if prev_release_id is not None:
|
try:
prev_release = Release.objects.get(id=prev_release_id)
except Release.DoesNotExist:
pass
for ref in refs:
try:
repo = Repository.objects.get(
organization_id=release.organization_id, name=ref["repository"]
)
except Repository.DoesNotExist:
logger.info(
"repository.missing",
extra={
"organization_id": release.organization_id,
"user_id": user_id,
"repository": ref["repository"],
},
)
continue
binding_key = (
"integration-repository.provider"
if is_integration_provider(repo.provider)
else "repository.provider"
)
try:
provider_cls = bindings.get(binding_key).get(repo.provider)
except KeyError:
continue
# if previous commit isn't provided, try to get from
# previous release otherwise, try to get
# recent commits from provider api
start_sha = None
if ref.get("previousCommit"):
start_sha = ref["previousCommit"]
elif prev_release:
try:
start_sha = ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id,
release=prev_release,
repository_id=repo.id,
).values_list("commit__key", flat=True)[0]
except IndexError:
pass
end_sha = ref["commit"]
provider = provider_cls(id=repo.provider)
try:
if is_integration_provider(provider.id):
repo_commits = provider.compare_commits(repo, start_sha, end_sha)
else:
repo_commits = provider.compare_commits(repo, start_sha, end_sha, actor=user)
except NotImplementedError:
pass
except Exception as exc:
logger.info(
"fetch_commits.error",
extra={
"organization_id": repo.organization_id,
"user_id": user_id,
"repository": repo.name,
"provider": provider.id,
"error": six.text_type(exc),
"end_sha": end_sha,
"start_sha": start_sha,
},
)
if isinstance(exc, InvalidIdentity) and getattr(exc, "identity", None):
handle_invalid_identity(identity=exc.identity, commit_failure=True)
elif isinstance(exc, (PluginError, InvalidIdentity, IntegrationError)):
msg = generate_fetch_commits_error_email(release, exc.message)
msg.send_async(to=[user.email])
else:
msg = generate_fetch_commits_error_email(
release, "An internal system error occurred."
)
msg.send_async(to=[user.email])
else:
logger.info(
"fetch_commits.complete",
extra={
"organization_id": repo.organization_id,
"user_id": user_id,
"repository": repo.name,
"end_sha": end_sha,
"start_sha": start_sha,
"num_commits": len(repo_commits or []),
},
)
commit_list.extend(repo_commits)
if commit_list:
release.set_commits(commit_list)
deploys = Deploy.objects.filter(
organization_id=release.organization_id, release=release, notified=False
).values_list("id", "environment_id", "date_finished")
# XXX(dcramer): i dont know why this would have multiple environments, but for
# our sanity lets assume it can
pending_notifications = []
last_deploy_per_environment = {}
for deploy_id, environment_id, date_finished in deploys:
last_deploy_per_environment[environment_id] = (deploy_id, date_finished)
pending_notifications.append(deploy_id)
repo_queryset = ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id, release=release
).values_list("repository_id", "commit")
# we need to mark LatestRelease, but only if there's not a deploy which has completed
# *after* this deploy (given we might process commits out of order)
for repository_id, commit_id in repo_queryset:
for environment_id, (deploy_id, date_finished) in six.iteritems(
last_deploy_per_environment
):
if not Deploy.objects.filter(
id__in=LatestRelease.objects.filter(
repository_id=repository_id, environment_id=environment_id
).values("deploy_id"),
date_finished__gt=date_finished,
).exists():
LatestRelease.objects.create_or_update(
repository_id=repository_id,
environment_id=environment_id,
values={
"release_id": release.id,
"deploy_id": deploy_id,
"commit_id": commit_id,
},
)
for deploy_id in pending_notifications:
Deploy.notify_if_ready(deploy_id, fetch_complete=True)
def is_integration_provider(provider):
return provider and provider.startswith("integrations:")
|
medallia/aurora
|
src/main/python/apache/aurora/executor/common/sandbox.py
|
Python
|
apache-2.0
| 12,730
| 0.012412
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import grp
import os
import pwd
import shutil
import subprocess
from abc import abstractmethod, abstractproperty
from twitter.common import log
from twitter.common.dirutil import safe_mkdir, safe_rmtree, touch
from twitter.common.lang import Interface
from gen.apache.aurora.api.constants import TASK_FILESYSTEM_MOUNT_POINT
class SandboxInterface(Interface):
class Error(Exception): pass
class CreationError(Error): pass
class DeletionError(Error): pass
@abstractproperty
def root(self):
"""Return the root path of the sandbox within the host filesystem."""
@abstractproperty
def container_root(self):
"""Return the root path of the sandbox as it's visible to the running task."""
@abstractproperty
def chrooted(self):
"""Returns whether or not the sandbox is a chroot."""
@abstractproperty
def is_filesystem_image(self):
"""Returns whether or not the task is using a filesystem image."""
@abstractmethod
def exists(self):
"""Returns true if the sandbox appears to exist."""
@abstractmethod
def create(self, *args, **kw):
"""Create the sandbox."""
@abstractmethod
def destroy(self, *args, **kw):
"""Destroy the sandbox."""
class SandboxProvider(Interface):
def _get_sandbox_user(self, assigned_task):
return assigned_task.task.job.role
@abstractmethod
def from_assigned_task(self, assigned_task):
"""Return the appropriate Sandbox implementation from an AssignedTask."""
class DefaultSandboxProvider(SandboxProvider):
MESOS_DIRECTORY_ENV_VARIABLE = 'MESOS_DIRECTORY'
def from_assigned_task(self, assigned_task, **kwargs):
mesos_dir = os.environ[self.MESOS_DIRECTORY_ENV_VARIABLE]
container = assigned_task.task.container
if container.docker:
return DockerDirectorySandbox(mesos_dir, **kwargs)
elif container.mesos and container.mesos.image:
return FileSystemImageSandbox(
mesos_dir,
user=self._get_sandbox_user(assigned_task),
**kwargs)
else:
return DirectorySandbox(mesos_dir, user=self._get_sandbox_user(assigned_task), **kwargs)
class DirectorySandbox(SandboxInterface):
""" Basic sandbox implementation using a directory on the filesystem """
SANDBOX_NAME = 'sandbox'
def __init__(self, mesos_dir, user=getpass.getuser(), **kwargs):
self._mesos_dir = mesos_dir
self._user = user
@property
def root(self):
return os.path.join(self._mesos_dir, self.SANDBOX_NAME)
@property
def container_root(self):
return self.root
@property
def chrooted(self):
return False
@property
def is_filesystem_image(self):
return False
def exists(self):
return os.path.exists(self.root)
def get_user_and_group(self):
try:
pwent = pwd.getpwnam(self._user)
grent = grp.getgrgid(pwent.pw_gid)
return (pwent, grent)
except KeyError:
raise self.CreationError(
'Could not create sandbox because user does not exist: %s' % self._user)
def create(self):
log.debug('DirectorySandbox: mkdir %s' % self.root)
try:
safe_mkdir(self.root)
except (IOError, OSError) as e:
raise self.CreationError('Failed to create the sandbox: %s' % e)
if self._user:
pwent, grent = self.get_user_and_group()
try:
# Mesos provides a sandbox directory with permission 0750 owned by the user of the executor.
# In case of Thermos this is `root`, as Thermos takes the responsibility to drop
# privileges to the designated non-privileged user/role. To ensure non-provileged processes
# can still read their sandbox, Thermos must also update the permissions of the scratch
# directory created by Mesos.
# This is necessary since Mesos 1.6.0 (https://issues.apache.org/jira/browse/MESOS-8332).
log.debug('DirectorySandbox: chown %s:%s %s' % (self._user, grent.gr_name, self._mesos_dir))
os.chown(self._mesos_dir, pwent.pw_uid, pwent.pw_gid)
log.debug('DirectorySandbox: chown %s:%s %s' % (self._user, grent.gr_name, self.root))
os.chown(self.root, pwent.pw_uid, pwent.pw_gid)
log.debug('DirectorySandbox: chmod 700 %s' % self.root)
os.chmod(self.root, 0700)
except (IOError, OSError) as e:
raise self.CreationError('Failed to chown/chmod the sandbox: %s' % e)
def destroy(self):
try:
safe_rmtree(self.root)
except (IOError, OSError) as e:
raise self.DeletionError('Failed to destroy sandbox: %s' % e)
class DockerDirectorySandbox(DirectorySandbox):
""" A sandbox implementation that configures the sandbox correctly for docker containers. """
def __init__(self, mesos_dir, **kwargs):
# remove the user value from kwargs if it was set.
kwargs.pop('user', None)
super(DockerDirectorySandbox, self).__init__(mesos_dir, user=None, **kwargs)
def _create_symlinks(self):
# This sets up the container to have a similar directory structure to the host.
# It takes self._mesos_dir (e.g. "[exec-root]/runs/RUN1/") and:
# - Sets mesos_host_sandbox_root = "[exec-root]/runs/" (one level up from mesos_host_sandbox)
# -
|
Creates mesos_host_sandbox_root (recursively)
# - Symlinks self._mesos_dir -> $MESOS_SANDBOX (typically /mnt/mesos/sandbox)
# $MESOS_SANDBOX is provided in the environment by the Mesos containerizer.
mesos_host_sandbox_root = os.path.dirname(self._mesos_dir)
try:
safe_mkdir(mesos_host_sandbox_root)
os.symlink(os.environ['MESOS_SANDBOX'], self._mesos_dir)
except (IOError, OSError) as e:
raise self.CreationError('Failed to create the sandbox root: %s' % e)
|
def create(self):
self._create_symlinks()
super(DockerDirectorySandbox, self).create()
class FileSystemImageSandbox(DirectorySandbox):
"""
A sandbox implementation that configures the sandbox correctly for tasks provisioned from a
filesystem image.
"""
# returncode from a `useradd` or `groupadd` call indicating that the uid/gid already exists.
_USER_OR_GROUP_ID_EXISTS = 4
# returncode from a `useradd` or `groupadd` call indicating that the user/group name
# already exists.
_USER_OR_GROUP_NAME_EXISTS = 9
def __init__(self, mesos_dir, **kwargs):
self._task_fs_root = os.path.join(mesos_dir, TASK_FILESYSTEM_MOUNT_POINT)
self._no_create_user = kwargs.pop('no_create_user', False)
self._mounted_volume_paths = kwargs.pop('mounted_volume_paths', None)
self._sandbox_mount_point = kwargs.pop('sandbox_mount_point', None)
if self._sandbox_mount_point is None:
raise self.Error(
'Failed to initialize FileSystemImageSandbox: no value specified for sandbox_mount_point')
super(FileSystemImageSandbox, self).__init__(mesos_dir, **kwargs)
def _verify_group_match_in_taskfs(self, group_id, group_name):
try:
result = subprocess.check_output(
['chroot', self._task_fs_root, 'getent', 'group', group_name])
except subprocess.CalledProcessError as e:
raise self.CreationError(
'Error when getting group id for name %s in task image: %s' % (
group_name, e))
splitted = result.split(':')
if (len(splitted) < 3 or splitted[0] != '%s' % group_name or
splitted[2] != '%s' % group_id):
raise self.CreationError(
'Group id result %s from image does not match name %s and id %s' % (
result, group_name, group_id))
def _verify_user_match_in_taskfs(self, user_id, user_name, group_id, group_name):
try:
result = subprocess.check_output(
['chroot', self._task_fs_root, 'id', '%s' % user_nam
|
doctormo/django-autotest
|
testapp/models.py
|
Python
|
agpl-3.0
| 217
| 0.018433
|
"""
Test models.
""
|
"
from django.db.models import Model, CharField, BooleanField, DateTimeField
#raise IOError("A")
class Thing(Model):
name = CharField(max_length=32)
#
|
value = BooleanField(default=False)
|
germfue/vps-tools
|
vps/console.py
|
Python
|
bsd-3-clause
| 3,431
| 0.000292
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Germán Fuentes Capella <development@fuentescapella.com>
# BSD 3-Clause License
#
# Copyright (c) 2017, Germán Fuentes Capella
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ruamel.yaml
from clint.textui import puts, columns
from clint.textui.cols import console_width
def get_headers(dl):
headers = set()
for d in dl:
for key in d.keys():
headers.add(key)
headers = list(headers)
headers.sort()
return headers
def column_size(headers, dl):
csize = {}
for header in headers:
# initialize to the length of the key (header)
length = len(header)
for d in dl:
item_length = len(str(d.get(header, '')))
if item_length > length:
length = item_length
csize[header] = length
return csize
def _trim(value, length):
value = str(value)
if len(value) > length:
value = value[0:length]
value = value[0:-3] + '...'
return value
def display_yaml(a_dict):
puts(ruamel.yaml.dump(a_dict, Dumper=ruamel.yaml.RoundTripDumper))
def display(dl):
"""
Displays a list of dicts (d
|
l) that contain same keys
"""
headers = get_headers(dl)
csize = column_size(headers, dl)
cons_width = console_width({})
values = csize.values()
content_width = sum(values)
if content_width > cons_width:
# if content is bigger than console, sw
|
itch to yaml format
output = {}
for d in dl:
key = d.get('label') or d.get('SUBID') or d.get('SCRIPTID')
output[key] = d
puts(ruamel.yaml.dump(output, Dumper=ruamel.yaml.RoundTripDumper))
else:
# otherwise, print a table
row = [[header, csize.get(header, '')] for header in headers]
puts(columns(*row))
for d in dl:
row = [[_trim(d.get(h, ''), csize[h]), csize[h]] for h in headers]
puts(columns(*row))
|
alexgerstein/dartmouth-roommates
|
migrations/versions/36e92a9c018b_.py
|
Python
|
gpl-2.0
| 748
| 0.009358
|
"""empty message
Revision ID: 36e92a9c018b
Revises: 4758ea467345
Create Date: 2015-05-06 02:42:39.064090
"""
# revision identifiers, used by Alembic.
revision = '36e92a9c018b'
down_revision = '4758ea467345'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ##
|
#
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('last_emailed', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batc
|
h_op.drop_column('last_emailed')
### end Alembic commands ###
|
TarnumG95/PictureMatchCheater
|
merge/UI.py
|
Python
|
mit
| 1,196
| 0.016863
|
# -*- coding: utf-8 -*-
# -- cmd -- pip install PyUserInput
import time
import win32gui
import win32con
import PIL.ImageGrab
from pymouse import PyMouse
PIECE_X = 44
PIECE_Y = 40
NUM_X = 14
NUM_Y = 10
d
|
ef getOrigin():
cwllk = '宠物连连看'.decode('utf8')
hwnd = win32gui.FindWindow("#32770", cwllk)
print hwnd
#win32gui.ShowWindow(hwnd, win32con.SW_SHOWMINIMIZED)
win32gui.ShowWindow(hwnd, win32con.SW_SHOWNORMAL)
win32gui.SetForegroundWindow(hwnd)
rect = win32gui.GetWindowRect(hwnd)
time.sleep(0.5)
#print rect
#newRect = (rect[0] + 58, rect[1] + 104, rect[0] + 674, rect[1] + 504)
|
return rect
def getPic(RECT):
"""
RECT = (x1,y1,x2,y2)
"""
pic = PIL.ImageGrab.grab(getOrigin())
return pic
def pause():
m = PyMouse()
m.click(RECT[0] -58 + 307, RECT[1] - 104 + 62)
time.sleep(0.5)
def click(pos):
'''
pos: (x, y) # (0, 0) for top left piece
'''
m = PyMouse()
m.click(pos[0],pos[1])
# only for test
if __name__ == '__main__':
pic = getPic()
pic.save("screenshot" + ".png")
click((0,0))
click((0,9))
click((13,9))
click((13,0))
pause()
|
DavidAndreev/indico
|
indico/modules/events/surveys/controllers/display.py
|
Python
|
gpl-3.0
| 5,780
| 0.003114
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licens
|
es/>.
from __future__ import unicode_literals
from flask import redirect, flash, session, request
from sqlalchemy.orm import joinedload
from werkzeug.exceptions import Forbidden
from indico.core.db import db
from indico.modules.auth.util import redirect_to_login
from indico.modules.events.models.events import EventType
from indico.modules.events.surveys.models.items import SurveySection
from indico.modules.events.surveys.models.submissions import S
|
urveyAnswer, SurveySubmission
from indico.modules.events.surveys.models.surveys import Survey, SurveyState
from indico.modules.events.surveys.util import make_survey_form, was_survey_submitted, save_submitted_survey_to_session
from indico.modules.events.surveys.views import (WPDisplaySurveyConference, WPDisplaySurveyMeeting,
WPDisplaySurveyLecture)
from indico.util.i18n import _
from indico.web.flask.util import url_for
from MaKaC.webinterface.rh.conferenceDisplay import RHConferenceBaseDisplay
def _can_redirect_to_single_survey(surveys):
return len(surveys) == 1 and surveys[0].is_active and not was_survey_submitted(surveys[0])
class RHSurveyBaseDisplay(RHConferenceBaseDisplay):
def _checkParams(self, params):
RHConferenceBaseDisplay._checkParams(self, params)
self.event = self._conf
@property
def view_class(self):
mapping = {EventType.conference: WPDisplaySurveyConference,
EventType.meeting: WPDisplaySurveyMeeting,
EventType.lecture: WPDisplaySurveyLecture}
return mapping[self.event_new.type_]
class RHSurveyList(RHSurveyBaseDisplay):
def _process(self):
surveys = Survey.find_all(Survey.is_visible, Survey.event_id == int(self.event.id),
_eager=(Survey.questions, Survey.submissions))
if _can_redirect_to_single_survey(surveys):
return redirect(url_for('.display_survey_form', surveys[0]))
return self.view_class.render_template('display/survey_list.html', self.event, surveys=surveys,
event=self.event, states=SurveyState,
was_survey_submitted=was_survey_submitted)
class RHSubmitSurvey(RHSurveyBaseDisplay):
CSRF_ENABLED = True
normalize_url_spec = {
'locators': {
lambda self: self.survey
}
}
def _checkProtection(self):
RHSurveyBaseDisplay._checkProtection(self)
if self.survey.require_user and not session.user:
raise Forbidden(response=redirect_to_login(reason=_('You are trying to answer a survey '
'that requires you to be logged in')))
def _checkParams(self, params):
RHSurveyBaseDisplay._checkParams(self, params)
self.survey = (Survey
.find(Survey.id == request.view_args['survey_id'], Survey.is_visible)
.options(joinedload(Survey.submissions))
.options(joinedload(Survey.sections).joinedload(SurveySection.children))
.one())
if not self.survey.is_active:
flash(_('This survey is not active'), 'error')
return redirect(url_for('.display_survey_list', self.event))
elif was_survey_submitted(self.survey):
flash(_('You have already answered this survey'), 'error')
return redirect(url_for('.display_survey_list', self.event))
def _process(self):
form = make_survey_form(self.survey)()
if form.validate_on_submit():
submission = self._save_answers(form)
save_submitted_survey_to_session(submission)
self.survey.send_submission_notification(submission)
flash(_('Your answers has been saved'), 'success')
return redirect(url_for('.display_survey_list', self.event))
surveys = Survey.find_all(Survey.is_visible, Survey.event_id == int(self.event.id))
if not _can_redirect_to_single_survey(surveys):
back_button_endpoint = '.display_survey_list'
elif self.event.getType() != 'conference':
back_button_endpoint = 'event.conferenceDisplay'
else:
back_button_endpoint = None
return self.view_class.render_template('display/survey_questionnaire.html', self.event, form=form,
event=self.event, survey=self.survey,
back_button_endpoint=back_button_endpoint)
def _save_answers(self, form):
survey = self.survey
submission = SurveySubmission(survey=survey)
if not survey.anonymous:
submission.user = session.user
for question in survey.questions:
answer = SurveyAnswer(question=question, data=getattr(form, 'question_{}'.format(question.id)).data)
submission.answers.append(answer)
db.session.flush()
return submission
|
nsdf/nsdf
|
benchmark/benchmark_writer.py
|
Python
|
gpl-3.0
| 13,434
| 0.00402
|
# benchmark_writer.py ---
#
# Filename: benchmark_writer.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Sep 3 10:22:50 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This script benchmarks the nsdf writer using randomly generated
data.
Note that we violate the unique source id requirement here.
"""
import sys
import argparse
from collections import defaultdict
import numpy as np
from numpy import testing as nptest
import h5py as h5
from datetime import datetime
import unittest
import os
import socket
sys.path.append('..')
import nsdf
DATADIR = '/data/subha/nsdf_samples/benchmark'
HOSTNAME = socket.gethostname()
PID = os.getpid()
TIMESTAMP = datetime.now().strftime('%Y%m%d_%H%M%S')
np.random.seed(1) # For reproducibility
def get_poisson_times(npoints, rate):
"""Return `npoints` time points from a Poisson event with rate
`rate`"""
scale = 1.0/rate
return np.cumsum(np.random.exponential(scale=scale, size=npoints))
def create_uniform_data(name, num_sources, num_cols):
"""Create data for m=`num_sources`, each n=`num_cols` long."""
data = nsdf.UniformData(name, field='Vm', unit='V', dt=1e-5, tunit='s', dtype=np.float32)
for ii in range(num_sources):
data.put_data('src_{}'.format(ii), np.random.rand(num_cols))
return data
def create_nonuniform_data(name, num_sources, mincol, maxcol):
"""Create nonuniform data for m=`num_sources`, the number of sampling
points n for each source is randomly chosen between `mincol` and
`maxcol`
"""
data = nsdf.NonuniformData(name, unit='V', tunit='s', dtype=np.float32, ttype=np.float32)
if mincol < maxcol:
ncols = np.random.randint(low=mincol, high=maxcol, size=num_sources)
else:
ncols = np.ones(num_sources, dtype=int) * maxcols
for ii in range(num_sources):
value = np.random.rand(ncols[ii])
time = get_poisson_times(ncols[ii], 10)
data.put_data('src_{}'.format(ii), (value, time))
return data
def create_event_data(name, num_sources, mincol, maxcol):
"""Create event data for m=`num_sources`, the number of sampling
points for each source is randomly chosen between `mincol` and
`maxcol`
"""
data = nsdf.EventData(name, unit='s', dtype=np.float32)
ncols = np.random.randint(low=mincol, high=maxcol, size=num_sources)
for ii in range(num_sources):
data.put_data('src_{}'.format(ii), get_poisson_times(ncols[ii], 10))
return data
def create_uniform_vars(num_vars, num_sources, num_cols, prefix='var'):
"""Note that they all share the same sources."""
ret = []
for ii in range(num_vars):
ret.append(create_uniform_data('{}_{}'.format(prefix, ii),
num_sources,
num_cols))
return ret
def create_nonuniform_vars(num_vars, num_sources, mincol, maxcol, prefix='var'):
"""Note that they all share the same sources."""
ret = []
for ii in range(num_vars):
ret.append(create_nonuniform_data('{}_{}'.format(prefix, ii),
num_sources,
mincol, maxcol))
return ret
def create_event_vars(num_vars, num_sources, mincol, maxcol, prefix='var'):
"""Note that they all share the same sources."""
ret = []
for ii in range(num_vars):
ret.append(create_event_data('{}_{}'.format(prefix, ii),
num_sources, mincol, maxcol))
return ret
def create_datasets(args):
uvar_list = []
|
nvar_list = []
evar_list = []
if args.sampling:
if args.sampling.startswith('u'):
uvar_list = create_uniform_vars(args.variables,
|
args.sources,
(args.maxcol + args.mincol) / 2,
prefix='uniform')
elif args.sampling.startswith('n'):
nvar_list = create_nonuniform_vars(args.variables,
args.sources,
args.mincol,
args.maxcol,
prefix='nonuniform')
elif args.sampling.startswith('e'):
evar_list = create_event_vars(args.variables,
args.sources,
args.mincol,
args.maxcol,
prefix='event')
else:
uvar_list = create_uniform_vars(args.variables,
args.sources,
(args.maxcol + args.mincol) / 2,
prefix='uniform')
nvar_list = create_nonuniform_vars(args.variables,
args.sources,
args.mincol,
args.maxcol,
prefix='nonuniform')
evar_list = create_event_vars(args.variables,
args.sources,
args.mincol,
args.maxcol,
prefix='event')
return {'uniform': uvar_list,
'nonuniform': nvar_list,
'event': evar_list}
def write_incremental(writer, source_ds, data, step, maxcol, dialect):
for ii in range(0, maxcol + step - 1, step):
if isinstance(data, nsdf.UniformData):
tmp = nsdf.UniformData(data.name, unit=data.unit,
dt=data.dt, tunit=data.tunit, dtype=np.float32)
for src, value in data.get_source_data_dict().items():
tmp.put_data(src, value[ii: ii + step])
writer.add_uniform_data(source_ds, tmp)
elif isinstance(data, nsdf.NonuniformData):
tmp = nsdf.NonuniformData(data.name, unit=data.unit,
tunit=data.tunit, dtype=np.float32, ttype=np.float32)
for src, (value, time) in data.get_source_data_dict().items():
value_chunk = value[ii: ii+step]
time_chunk = time[ii: ii+step]
tmp.put_data(src, (value_chunk, time_chunk))
if dialect == nsdf.dialect.ONED:
writer.add_nonuniform_1d(source_ds, tmp)
elif dialect == nsdf.dialect.VLEN:
writer.add_nonuniform_vlen(source_ds, tmp)
else:
writer.add_nonuniform_nan(source_ds, tmp)
elif isinstance(data, nsdf.EventData):
tmp = nsdf.EventData(data.name, unit=data.unit, dtype=np.float32)
for src, value in data.get_source_data_dict().items():
value_chunk = value[ii: ii+step]
tmp.put_data(src, value_chunk)
if dialect == nsdf.dialect.ONED:
writer.add_event_1d(source_ds, tmp)
elif dialect == nsdf.dialect.VLEN:
writer.add_event_vlen(source_ds, tmp)
else:
writer.add_event_nan(source_ds, tmp)
|
zingale/pyro2
|
compressible/BC.py
|
Python
|
bsd-3-clause
| 8,919
| 0.001009
|
"""
compressible-specific boundary conditions. Here, in particular, we
implement an HSE BC in the vertical direction.
Note: the pyro BC routines operate on a single variable at a time, so
some work will necessarily be repeated.
Also note: we may come in here with the aux_data (source terms), so
we'll do a special case for them
"""
import compressible.eos as eos
from util import msg
import math
import numpy as np
def user(bc_name, bc_edge, variable, ccdata):
"""
A hydrostatic boundary. This integrates the equation of HSE into
the ghost cells to get the pressure and density under the assumption
that the specific internal energy is constant.
Upon exit, the ghost cells for the input variable will be set
Parameters
----------
bc_name : {'hse'}
The descriptive name for the boundary condition -- this allows
for pyro to have multiple types of user-supplied boundary
conditions. For this module, it needs to be 'hse'.
bc_edge : {'ylb', 'yrb'}
The boundary to update: ylb = lower y boundary; yrb = upper y
boundary.
variable : {'density', 'x-momentum', 'y-momentum', 'energy'}
The variable whose ghost cells we are filling
ccdata : CellCenterData2d object
The data object
"""
myg = ccdata.grid
if bc_name == "hse":
if bc_edge == "ylb":
# lower y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
j = myg.jlo-1
while j >= 0:
v[:, j] = v[:, myg.jlo]
j -= 1
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jlo]
ke_base = 0.5*(xmom[:, myg.jlo]**2 + ymom[:, myg.jlo]**2) / \
dens[:, myg.jlo]
eint_base = (ener[:, myg.jlo] - ke_base)/dens[:, myg.jlo]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
j = myg.jlo-1
while j >= 0:
pres_below = pres_base - grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_below)
ener[:, j] = rhoe + ke_base
pres_base = pres_below.copy()
j -= 1
else:
raise NotImplementedError("variable not defined")
elif bc_edge == "yrb":
# upper y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
v[:, j] = v[:, myg.jhi]
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jhi]
ke_base = 0.5*(xmom[:, myg.jhi]**2 + ymom[:, myg.jhi]**2) / \
dens[:, myg.jhi]
eint_base = (ener[:, myg.jhi] - ke_base)/dens[:, myg.jhi]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
pres_above = pres_base + grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_above)
ener[:, j] = rhoe + ke_base
pres_base = pres_above.copy()
else:
raise NotImplementedError("variable not defined")
else:
msg.fail("error: hse BC not supported for xlb or xrb")
elif bc_name == "ramp":
# Boundary conditions for double Mach reflection problem
gamma = ccdata.get_aux("gamma")
if bc_edge == "xlb":
# low
|
er x boundary
# inflow condition with post shock setup
v = ccdata.get_var(variable)
i = myg.ilo - 1
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
val = inflow_post_bc(variable, gamma)
while i >= 0:
v[i,
|
:] = val
i = i - 1
else:
v[:, :] = 0.0 # no source term
elif bc_edge == "ylb":
# lower y boundary
# for x > 1./6., reflective boundary
# for x < 1./6., inflow with post shock setup
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
j = myg.jlo - 1
jj = 0
while j >= 0:
xcen_l = myg.x < 1.0/6.0
xcen_r = myg.x >= 1.0/6.0
v[xcen_l, j] = inflow_post_bc(variable, gamma)
if variable == "y-momentum":
v[xcen_r, j] = -1.0*v[xcen_r, myg.jlo+jj]
else:
v[xcen_r, j] = v[xcen_r, myg.jlo+jj]
j = j - 1
jj = jj + 1
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
elif bc_edge == "yrb":
# upper y boundary
# time-dependent boundary, the shockfront moves with a 10 mach velocity forming an angle
# to the x-axis of 30 degrees clockwise.
# x coordinate of the grid is used to judge whether the cell belongs to pure post shock area,
# the pure pre shock area or the mixed area.
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
shockfront_up = 1.0/6.0 + (myg.y[j] + 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront_down = 1.0/6.0 + (myg.y[j] - 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront = np.array([shockfront_down, shockfront_up])
for i in range(myg.ihi+myg.ng+1):
v[i, j] = 0.0
cx_down = myg.x[i] - 0.5*myg.dx*math.sqrt(3)
cx_up = myg.x[i] + 0.5*myg.dx*math.sqrt(3)
cx = np.array([cx_down, cx_up])
for sf in shockfront:
for x in cx:
if x < sf:
v[i, j] = v[i, j] + 0.25*inflow_post_bc(variable, gamma)
else:
v[i, j] = v[i, j] + 0.25*inflow_pre_bc(variable, gamma)
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
else:
msg.fail("error: bc type %s not supported" % (bc_name))
def inflow_post_bc(var, g):
# inflow boundary con
|
rodrigoasmacedo/l10n-brazil
|
__unported__/l10n_br_account_product/wizard/l10n_br_account_nfe_export_invoice.py
|
Python
|
agpl-3.0
| 8,955
| 0.00302
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# Copyright (C) 2011 Vinicius Dittgen - PROGE, Leonardo Santagada - PROGE #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import time
import base64
from openerp.osv import orm, fields
from openerp.tools.translate import _
class L10n_brAccountNfeExportInvoice(orm.TransientModel):
""" Export fiscal eletronic file from invoice"""
_name = 'l10n_br_account_product.nfe_export_invoice'
_description = 'Export eletronic invoice for Emissor de NFe SEFAZ SP'
_columns = {
'name': fields.char('Nome', size=255),
'file': fields.binary('Arquivo', readonly=True),
'file_type': fields.selection(
[('xml', 'XML'), ('txt', ' TXT')], 'Tipo do Arquivo'),
'state': fields.selection(
[('init', 'init'), ('done', 'done')], 'state', readonly=True),
'nfe_environment': fields.selection(
[('1', u'Produção'), ('2', u'Homologação')], 'Ambiente'),
'sign_xml': fields.boolean('Assinar XML'),
'nfe_export_result': fields.one2many(
'l10n_br_account_product.nfe_export_invoice_result', 'wizard_id',
'NFe Export Result'),
'export_folder': fields.boolean(u'Salvar na Pasta de Exportação'),
}
def _default_file_type(self, cr, uid, context):
file_type = False
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company = self.pool.get('res.company').browse(
cr, uid, user.company_id.id, context=context)
file_type = company.file_type
return file_type
def _default_nfe_environment(self, cr, uid, context):
nfe_environment = False
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company = self.pool.get('res.company').browse(
cr, uid, user.company_id.id, context=context)
nfe_environment = company.nfe_environment
return nfe_environment
def _default_sign_xml(self, cr, uid, context):
sign_xml = False
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company = self.pool.get('res.company').browse(
cr, uid, user.company_id.id, context=context)
sign_xml = company.sign_xml
return sign_xml
def _default_export_folder(self, cr, uid, context):
export_folder = False
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company = self.pool.get('res.company').browse(
cr, uid, user.company_id.id, context=context)
export_folder = company.export_folder
return export_folder
_defaults = {
'state': 'init',
'file_type': _default_file_type,
'nfe_environment': _default_nfe_environment,
'sign_xml': _default_sign_xml,
'export_folder': _default_export_folder,
}
def _get_invoice_ids(self, cr, uid, data, context=None):
if not context:
context = {}
return context.get('active_ids', [])
def nfe_export(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
inv_obj = self.pool.get('account.invoice')
active_ids = self._get_invoice_ids(cr, uid, data, context)
export_inv_ids = []
export_inv_numbers = []
company_ids = []
err_msg = ''
if not active_ids:
err_msg = u'Não existe nenhum documento fiscal para ser exportado!'
for inv in inv_obj.browse(cr, uid, active_ids, context=context):
if inv.state not in ('sefaz_export'):
err_msg += u"O Documento Fiscal %s não esta definida para ser \
exportação para a SEFAZ.\n" % inv.internal_number
elif not inv.issuer == '0':
err_msg += u"O Documento Fiscal %s é do tipo externa e não \
pode ser exportada para a receita.\n" % inv.internal_number
else:
inv_obj.write(cr, uid, [inv.id], {'nfe_export_date': False,
'nfe_access_key': False,
'nfe_status': False,
'nfe_date': False})
message = "O Documento Fiscal %s foi \
exportado." % inv.internal_number
inv_obj.log(cr, uid, inv.id, message)
export_inv_ids.append(inv.id)
company_ids.append(inv.company_id.id)
export_inv_numbers.append(inv.internal_number)
if len(set(company_ids)) > 1:
err_msg += u'Não é permitido exportar Documentos \
Fiscais de mais de uma empresa, por favor selecione Documentos \
Fiscais da mesma empresa.'
if export_inv_ids:
if len(export_inv_numbers) > 1:
name = 'nfes%s-%s.%s' % (
time.strftime('%d-%m-%Y'),
self.pool.get('ir.sequence').get(cr, uid, 'nfe.export'),
data['file_type'])
else:
name = 'nfe%s.%s' % (export_inv_numbers[0], data['file_type'])
mod_serializer = __import__(
'l10n_br_account_product.sped.nfe.serializer.' +
data['file_type'], globals(), locals(), data['file_type'])
func = getattr(mod_serializer, 'nfe_export')
company_pool = self.pool.get('res.company')
company = company_pool.browse(cr, uid, inv.company_id.id)
str_nfe_version = inv.nfe_version
nfes = func(
cr, uid, export_inv_ids, data['nfe_environment'],
str_nfe_version, context)
for nfe in nfes:
#if nfe['message']:
#status = 'error'
#else:
#status = 'success'
#self.pool.get(self._name + '_result').create(
#cr, uid, {'document': nfe['key'],
#'message': nfe['message'],
#'status': status,
#'wizard_id': data['id']})
nfe_file = nfe['nfe'].encode('utf8')
self.write(
cr, uid, ids, {'file': base64.b64encode(nfe_file),
'state': 'done', 'name': name}, context=context)
if err_msg:
raise orm.except_orm(_('Error!'), _("'%s'") % _(err_msg, ))
mod_obj = self.pool.get('ir.mod
|
el.data')
model_data_ids = mod_obj.search(
cr, uid, [(
|
'model', '=', 'ir.ui.view'),
('name', '=', 'l10n_br_account_product_nfe_export_invoice_form')],
context=context)
resource_id = mod_obj.read(
cr, uid, model_data_ids,
fields=['res_id'], context=context)[0]['res_id']
return {
'type': 'ir.acti
|
timotk/tweakstream
|
tweakstream/cli.py
|
Python
|
mit
| 3,757
| 0.000532
|
from datetime import datetime
import click
import crayons
import tweakers
from tabulate import tabulate
from . import __version__, config, utils
def format_date(dt):
if dt.date() == datetime.today().date():
return dt.strftime("%H:%M")
elif dt.year == datetime.today().year:
return dt.strftime("%d-%m")
else:
return dt.strftime("%d-%m-%Y")
def confirm_overwrite_existing_login():
if utils.cookies_exist():
confirmed = click.confirm(
"You are already logged in. Would you like to login to a different account?"
)
if confirmed:
config.stored_cookies_path.unlink()
click.echo("Existing login deleted.")
else:
raise SystemExit
def print_comment(comment):
"""Pretty print a comment"""
print(
crayons.yellow((comment.date.strftime("%H:%M"))
|
),
crayons.green(comment.user.name),
|
crayons.blue(comment.url),
)
print(comment.text, "\n")
def choose_topic(topics):
"""Return chosen topic from a printed list of topics
Args:
topics (list): List of Topic objects
Returns:
topic (Topic): Chosen topic
"""
table = []
for i, t in enumerate(topics):
row = [i + 1, t.title, format_date(t.last_reply)]
table.append(row)
print("\n", tabulate(table, headers=["#", "Titel", "Laatste reactie"]))
choice = click.prompt(f"\nChoose a topic to stream (1-{len(topics)})", type=int)
return topics[choice - 1]
@click.group()
@click.version_option(version=__version__)
@click.option("--last", default=3, help="Number of previous comments to show.")
@click.pass_context
def cli(ctx, last):
ctx.ensure_object(dict)
ctx.obj["last"] = last
try:
utils.load_persistent_cookies()
except FileNotFoundError:
pass
@cli.command(name="stream", help="Stream from a specific url.")
@click.argument("url")
@click.pass_context
def stream(ctx, url):
topic = tweakers.gathering.Topic(url=url)
for comment in topic.comment_stream(last=ctx.obj["last"]):
print_comment(comment)
@cli.command(name="list", help="Choose from a list of active topics.")
@click.option("-n", default=20, help="Number of topics to show.")
@click.pass_context
def list_active(ctx, n):
topics = tweakers.gathering.active_topics()[:n]
topic = choose_topic(topics)
for comment in topic.comment_stream(last=ctx.obj["last"]):
print_comment(comment)
@cli.command(name="search", help="Search for a specific topic.")
@click.argument("query", nargs=-1)
@click.option("-n", default=10, help="Number of results to show.")
@click.pass_context
def search(ctx, query, n):
query = " ".join(query)
topics = tweakers.gathering.search(query)
if len(topics) == 0:
click.echo("No topics found!")
raise SystemExit
topic = choose_topic(topics)
for comment in topic.comment_stream(last=ctx.obj["last"]):
print_comment(comment)
@cli.command(name="login", help="Login to tweakers.net.")
def login():
confirm_overwrite_existing_login()
username = click.prompt("Username")
password = click.prompt("Password", hide_input=True)
tweakers.utils.login(username=username, password=password)
utils.store_persistent_cookies()
click.echo("Login successful!")
@cli.command(name="bookmarks", help="Choose from a list of bookmarks.")
@click.pass_context
def bookmarks(ctx):
topics = tweakers.gathering.bookmarks()
if len(topics) == 0:
click.echo("No topics found!")
raise SystemExit
topic = choose_topic(topics)
for comment in topic.comment_stream(last=ctx.obj["last"]):
print_comment(comment)
if __name__ == "__main__":
cli(obj={})
|
Mirantis/mos-horizon
|
openstack_dashboard/test/integration_tests/pages/project/network/routerspage.py
|
Python
|
apache-2.0
| 5,775
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.common import exceptions
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages.project.network.\
routerinterfacespage import RouterInterfacesPage
from openstack_dashboard.test.integration_tests.pages.project.network\
.routeroverviewpage import RouterOverviewPage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class RoutersTable(tables.TableRegion):
name = "routers"
CREATE_ROUTER_FORM_FIELDS = ("name", "admin_state_up",
"external_network")
SET_GATEWAY_FORM_FIELDS = ("network_id", "router_name",
"router_id")
@tables.bind_table_action('create')
def create_router(self, create_button):
create_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_ROUTER_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete_router(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('cleargateway')
def clear_gateway(self, clear_gateway_button, row):
clear_gateway_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('setgateway')
def set_gateway(self, set_gateway_button, row):
set_gateway_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.SET_GATEWAY_FORM_FIELDS)
class RoutersPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE_UP = 'True'
DEFAULT_EXTERNAL_NETWORK = 'admin_floating_net'
ROUTERS_TABLE_NAME_COLUMN = 'name'
ROUTERS_TABLE_STATUS_COLUMN = 'status'
ROUTERS_TABLE_NETWORK_COLUMN = 'ext_net'
_interfaces_tab_locator = (by.By.CSS_SELECTOR,
'a[href*="tab=router_details__interfaces"]')
def __init__(self, driver, conf):
super(RoutersPage, self).__init__(driver, conf)
self._page_title = "Routers"
def _get_row_with_router_name(self, name):
return self.routers_table.get_row(
self.ROUTERS_TABLE_NAME_COLUMN, name)
@property
def routers_table(self):
return RoutersTable(self.driver, self.conf)
def create_router(self, name, admin_state_up=DEFAULT_ADMIN_STATE_UP,
external_network=DEFAULT_EXTERNAL_NETWORK):
create_router_form = self.routers_table.create_router()
create_router_form.name.text = name
if admin_state_up:
create_router_form.admin_state_up.value = admin_state_up
if external_network:
create_router_form.external_network.text = external_network
create_router_form.submit()
def set_gateway(self, router_id,
network_name=DEFAULT_EXTERNAL_NETWORK):
row = self._get_row_with_router_name(router_id)
set_gateway_form = self.routers_table.set_gateway(row)
set_gateway_form.network_id.text = network_name
set_gateway_form.submit()
def clear_gateway(self, name):
row = self._get_row_with_router_name(name)
confirm_clear_gateway_form = self.routers_table.clear_gateway(row)
confirm_clear_gateway_form.submit()
def delete_router(self, name):
row = self._get_row_with_router_name(name)
row.mark()
confirm_delete_routers_form = self.routers_table.delete_router()
confirm_delete_routers_form.submit()
def is_router_present(self, name):
return bool(self._get_row_with_router_name(name))
def is_router_active(self, name):
def cell_getter():
row = self._get_row_with_router_name(name)
return row and row.cells[self.ROUTERS_TABLE_STATUS_COLUMN]
return self.routers_table.wait_cell_status(cell_getter, 'Active')
def is_gateway_cleared(self, name):
row = self._get_row_with_router_name(name)
def cell_getter():
return row.cells[self.ROUTERS_TABLE_NETWORK_COLUMN]
try:
self._wait_till_text_present_in_element(cell_getter, '-')
except exceptions.TimeoutException:
return Fals
|
e
return True
def is_gateway_set(self, name, network_name=DEFAULT_EXTERNAL_NETWORK):
row = self._get_row_with_router_name(name)
def cell_getter():
return row.cells[self.ROUTERS_TABLE_NETWORK_COLUMN]
try:
self._wait_till_text_p
|
resent_in_element(cell_getter, network_name)
except exceptions.TimeoutException:
return False
return True
def go_to_interfaces_page(self, name):
self._get_element(by.By.LINK_TEXT, name).click()
self._get_element(*self._interfaces_tab_locator).click()
return RouterInterfacesPage(self.driver, self.conf, name)
def go_to_overview_page(self, name):
self._get_element(by.By.LINK_TEXT, name).click()
return RouterOverviewPage(self.driver, self.conf, name)
|
pybuilder/pybuilder
|
src/main/python/pybuilder/_vendor/pkg_resources/_vendor/importlib_resources/_itertools.py
|
Python
|
apache-2.0
| 884
| 0
|
from itertools import filterfalse
from typing import (
Callable,
Iterable,
Iterator,
Optional,
Set,
TypeVar,
Union,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def unique_everseen(
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
) -> Iterator[_T]:
"List unique elements, preserving order. Remember all elemen
|
ts ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C
|
D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen: Set[Union[_T, _U]] = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
|
open-mmlab/mmdetection
|
configs/retinanet/retinanet_r101_fpn_2x_coco.py
|
Python
|
apache-2.0
| 197
| 0
|
_base_ = './retinanet_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint
|
='torchvision://resnet10
|
1')))
|
zetasyanthis/myarchive
|
src/myarchive/libs/livejournal/backup.py
|
Python
|
mit
| 7,193
| 0.001529
|
#!/usr/bin/env python3
__revision__ = "$Rev$"
try:
import configparser
except ImportError:
import ConfigParser as configparser
import pickle
import datetime
import time
import os.path
import sys
from optparse import OptionParser
import lj
"""
journal backup dictionary structure:
{ 'last_entry': timestamp of the last journal entry sync'd,
'last_comment': id of the last comment sync'd,
'login': the dictionary returned by the last login (useful information such as friend groups),
'comment_posters': { [posterid]: [postername] }
'entries': { [entryid]: {
eventtime: timestamp,
security: 'private' or 'usemask',
allowmask: bitmask of usergroups allowed to see post,
subject: subject,
event: event text (url-encoded),
poster: user who posted the entry (if different from logged-in user),
props: dictionary of properties,
[other undocumented keys returned in a pseudo-arbitrary fashion by LJ],
} }
comments: { [commentid]: {
'posterid': poster id (map to username with comment_posters),
'jitemid': entry id,
'parentid': id of parent comment (0 if top-level),
'body': text of comment,
'date': date comment posted,
'subject': subject of comment,
[other undocumented keys returned in a pseudo-aritrary fashion by LJ],
} }
}
"""
DEFAULT_JOURNAL = {
'last_entry': None,
'last_comment': '0',
'last_comment_meta': None,
'entries': {},
'comments': {},
'comment_posters': {},
}
def datetime_from_string(s):
"""This assumes input in the form '2007-11-19 12:24:01' because that's all I care about"""
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
def days_ago(s):
return (datetime.datetime.today() - datetime_from_string(s)).days
def one_second_before(s):
return str(datetime_from_string(s[:19]) - datetime.timedelta(seconds=1))
def backup(user, password, journal):
server = lj.LJServer('lj.py+backup; kemayo@gmail.com', 'Python-lj.py/0.0.1')
try:
login = server.login(user, password, getpickws=True, getpickwurls=True)
except lj.LJException as e:
sys.exit(e)
# Load already-cached entries
journal['login'] = login
# Sync entries from the server
print("Downloading journal entries")
nj = update_journal_entries(server, journal)
# Sync comments from the server
print("Downloading comments")
nc = update_journal_comments(server, journal)
print(("Updated %d entries and %d comments" % (nj, nc)))
def backup_to_file(user, password, f):
journal = load_journal(f)
backup(user, password, journal)
save_journal(f, journal)
def load_journal(f):
# f should be a string referring to a file
if os.path.exists(f):
try:
j = pickle.load(open(f, 'rb'))
return j
except EOFError:
return DEFAULT_JOURNAL.copy()
return DEFAULT_JOURNAL.copy()
def save_journal(f, journal):
pickle.dump(journal, open(f, 'wb'))
def update_journal_entries(server, journal):
syncitems = built_syncitems_list(server, journal)
howmany = len(syncitems)
print(howmany, "entries to download")
while len(syncitems) > 0:
print("getting entries starting at", syncitems[0][1])
sync = server.getevents_syncitems(one_second_before(syncitems[0][1]))
for entry in sync['events']:
if hasattr(entry, 'data'):
entry = entry.data
journal['entries'][entry['itemid']] = entry
del(syncitems[0])
return howmany
def built_syncitems_list(server, journal):
all = []
count = 0
total = None
while count != total:
sync = server.syncitems(journal.get('last_entry'))
count = sync['count']
total = sync['total']
journalitems = [(int(e['item'][2:]), e['time']) for e in sync['syncitems'] if e['item'].startswith('L-')]
if journalitems:
all.extend(journalitems)
journal['last_entry'] = all[-1][1]
return all
def update_journal_comments(server, journal):
session = server.sessiongenerate()
initial_meta = get_meta_since(journal['last_comment'], server, session)
journal['comment_posters'].update(initial_meta['usermaps'])
if initial_meta['maxid'] > journal['last_comment']:
bodies = get_bodies_since(journal['last_comment'], initial_meta['maxid'], server, session)
journal['comments'].update(bodies)
if len(journal['comments']) == 0 or days_ago(journal['last_comment_meta']) > 30:
# update metadata every 30 days
all_meta = get_meta_since('0', server, session)
journal['comment_posters'].update(all_meta['usermaps'])
if len(journal['comments']) > 0:
for id, data in list(all_meta['comments'].items()):
journal['comments'][id]['posterid'] = data[0]
journal['comments'][id]['state'] = data[1]
journal['last_comment_meta'] = str(datetime.datetime.today())
howmany = int(initial_meta['maxid']) - int(journal['last_comment'])
journal['last_comment'] = initial_meta['maxid']
server.sessionexpire(session)
return howmany
def get_meta_since(highest, server, session):
all
|
= {'comments': {}, 'usermaps': {}}
maxid = str(int(highest) + 1)
while highest < maxid:
|
meta = server.fetch_comment_meta(highest, session)
maxid = meta['maxid']
for id, data in list(meta['comments'].items()):
if int(id) > int(highest):
highest = id
all['comments'][id] = data
all['usermaps'].update(meta['usermaps'])
all['maxid'] = maxid
return all
def get_bodies_since(highest, maxid, server, session):
all = {}
while highest != maxid:
meta = server.fetch_comment_bodies(highest, session)
for id, data in list(meta.items()):
if int(id) > int(highest):
highest = id
all[id] = data
if maxid in meta:
break
print("Downloaded %d comments so far" % len(all))
return all
def __dispatch():
parser = OptionParser(version="%%prog %s" % __revision__, usage="usage: %prog -u Username -p Password -f backup.pkl")
parser.add_option('-u', dest='user', help="Username")
parser.add_option('-p', dest='password', help="Password")
parser.add_option('-f', dest='file', help="Backup filename")
parser.add_option('-c', dest='config', help="Config file")
options, args = parser.parse_args(sys.argv[1:])
if options.config:
cp = configparser.ConfigParser()
cp.read(options.config)
username = cp.get("login", "username")
password = cp.get("login", "password")
filename = cp.get("login", "file")
backup_to_file(username, password, filename)
elif options.user and options.password and options.file:
backup_to_file(options.user, options.password, options.file)
else:
parser.error("If a config file is not being used, -u, -p, and -f must all be present.")
if __name__ == "__main__":
__dispatch()
|
charityscience/csh-sms
|
cshsms/urls.py
|
Python
|
gpl-3.0
| 925
| 0.005405
|
"""cshsms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2.
|
Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls impor
|
t include, url
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
url(r'^management/', include('management.urls')),
url(r'^admin/', admin.site.urls),
url(r'^$', RedirectView.as_view(url='/management'))
]
|
ActiveState/code
|
recipes/Python/181780_Using_wxPythTwisted/recipe-181780.py
|
Python
|
mit
| 465
| 0.021505
|
from wxPython.wx import *
from twisted.internet import reactor
class MyApp(wxApp):
def OnInit(self):
# Twisted Reactor Code
reactor.startRunning()
EVT_TIMER(self,999999,se
|
lf.OnTimer)
self.timer=wxTimer(self,999999)
self.timer.Start(250,False)
# End Twisted Code
# Do whatever you need to do here
return True
def OnTimer(self,event):
reactor.runUntilCurrent()
reactor.doIt
|
eration(0)
|
rnortman/boomscooter
|
boomscooter/follower.py
|
Python
|
apache-2.0
| 2,694
| 0.00631
|
# Copyright 2016 Randall Nortman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from collections import deque
from concurrent.futures import FIRST_COMPLETED
import functools
import itertools
import struct
import io
import threading
import os
import socket
import sys
import logging
LOG = logging.getLogger(__name__)
from master import MsgHeader
from master import MSG_FLAGS_COMMITTED
class Follower:
def __init__(self, reader, writer, loop):
self.read_task = asyncio.async(self.reader(reader, writer, loop))
return
@asyncio.coroutine
def reader(self, reader, writer, loop):
#print('reader')
try:
for i in itertools.count():
#print('read header', i, MsgHeader.size)
header = yield from reader.readexactly(MsgHeader
|
.size)
msg_len, flags, seqno = MsgHeader.unpack(header)
if flags & MSG_FLAGS_COMMITTED:
assert msg_len == MsgHeader.size
else:
|
payload = yield from reader.readexactly(msg_len - MsgHeader.size)
#loop.call_later(0.5, self.send_ack, writer, seqno)
self.send_ack(writer, seqno)
except:
LOG.exception('Exception occured in Follower task')
writer.close()
raise
def send_ack(self, writer, seqno):
#print('follower ack', seqno)
writer.write(MsgHeader.pack(MsgHeader.size, 0, seqno))
return
@classmethod
@asyncio.coroutine
def connect_and_run(cls, loop, host='127.0.0.1', port=8889):
reader, writer = yield from asyncio.open_connection(host, port,
loop=loop)
follower = cls(reader, writer, loop)
return (yield from follower.read_task)
def main():
#logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
#loop.set_debug(True)
task = asyncio.async(Follower.connect_and_run(loop))
print('main task', task)
loop.run_until_complete(task)
print('All done!', task.result())
loop.close()
return
if __name__ == '__main__':
main()
|
weiwang/popcorn
|
analysis/BagOfWords.py
|
Python
|
mit
| 4,190
| 0.011456
|
#!/usr/bin/env python
# Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Part 1 of the tutorial on Natural Language Processing.
#
# *************************************** #
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
import pandas as pd
import numpy as np
if __name__ == '__main__':
train = pd.read_csv(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), 'data', 'labeledTrainData.tsv'), header=0, \
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), 'data', 'testData.tsv'), header=0, delimiter="\t", \
quoting=3 )
print 'The first review is:'
print train["review"][0]
raw_input("Pres
|
s Enter to continue...")
#print 'Download text data sets. If you already have NLTK datasets downlo
|
aded, just close the Python download window...'
#nltk.download() # Download text data sets, including stop words
# Initialize an empty list to hold the clean reviews
clean_train_reviews = []
# Loop over each review; create an index i that goes from 0 to the length
# of the movie review list
print "Cleaning and parsing the training set movie reviews...\n"
for i in xrange( 0, len(train["review"])):
clean_train_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(train["review"][i], True)))
# ****** Create a bag of words from the training set
#
print "Creating the bag of words...\n"
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
# Numpy arrays are easy to work with, so convert the result to an
# array
train_data_features = train_data_features.toarray()
# ******* Train a random forest using the bag of words
#
print "Training the random forest (this may take a while)..."
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators = 100)
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
#
# This may take a few minutes to run
forest = forest.fit( train_data_features, train["sentiment"] )
# Create an empty list and append the clean reviews one by one
clean_test_reviews = []
print "Cleaning and parsing the test set movie reviews...\n"
for i in xrange(0,len(test["review"])):
clean_test_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(test["review"][i], True)))
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
print "Predicting test labels...\n"
result = forest.predict_proba(test_data_features)[:,1]
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), 'data', 'Bag_of_Words_test_results.csv'), index=False, quoting=3)
print "Wrote results to Bag_of_Words_test_results.csv"
|
rosarior/mayan
|
apps/sources/views.py
|
Python
|
gpl-3.0
| 31,364
| 0.004591
|
from __future__ import absolute_import
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
import sendfile
from acls.models import AccessEntry
from common.utils import encapsulate
from documents.conf.settings import THUMBNAIL_SIZE
from documents.exceptions import NewDocumentVersionNotAllowed
from documents.forms import DocumentTypeSelectForm
from documents.models import DocumentType, Document
from documents.permissions import (PERMISSION_DOCUMENT_CREATE,
PERMISSION_DOCUMENT_NEW_VERSION)
from metadata.api import decode_metadata_from_url, metadata_repr_as_list
from metadata.forms import MetadataSelectionForm, MetadataFormSet
from permissions.models import Permission
from .forms import (SourceTransformationForm, SourceTransformationForm_create,
WebFormSetupForm, StagingFolderSetupForm, StagingDocumentForm, WebFormForm,
WatchFolderSetupForm)
from .literals import (SOURCE_CHOICE_WEB_FORM, SOURCE_CHOICE_STAGING,
SOURCE_CHOICE_WATCH, SOURCE_UNCOMPRESS_CHOICE_Y, SOURCE_UNCOMPRESS_CHOICE_ASK)
from .models import (WebForm, StagingFolder, SourceTransformation,
WatchFolder)
from .permissions import (PERMISSION_SOURCES_SETUP_VIEW,
PERMISSION_SOURCES_SETUP_EDIT, PERMISSION_SOURCES_SETUP_DELETE,
PERMISSION_SOURCES_SETUP_CREATE)
from .staging import create_staging_file_class
from .wizards import DocumentCreateWizard
def document_create(request):
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_CREATE])
wizard = DocumentCreateWizard(form_list=[DocumentTypeSelectForm, MetadataSelectionForm, MetadataFormSet])
return wizard(request)
def document_create_siblings(request, document_id):
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_CREATE])
document = get_object_or_404(Document, pk=document_id)
query_dict = {}
for pk, metadata in enumerate(document.documentmetadata_set.all()):
query_dict['metadata%s_id' % pk] = metadata.metadata_type_id
query_dict['metadata%s_value' % pk] = metadata.value
if document.document_type_id:
query_dict['document_type_id'] = document.document_type_id
url = reverse('upload_interactive')
return HttpResponseRedirect('%s?%s' % (url, urlencode(query_dict)))
def return_function(obj):
return lambda context: context['source'].source_type == obj.source_type and context['source'].pk == obj.pk
def get_tab_link_for_source(source, document=None):
if document:
view = u'upload_version'
args = [document.pk, u'"%s"' % source.source_type, source.pk]
else:
view = u'upload_interactive'
args = [u'"%s"' % source.source_type, source.pk]
return {
'text': source.title,
'view': view,
'args': args,
'famfam': source.icon,
'keep_query': True,
'conditional_highlight': return_function(source),
}
def get_active_tab_links(document=None):
tab_links = []
web_forms = WebForm.objects.filter(enabled=True)
for web_form in web_forms:
tab_links.append(get_tab_link_for_source(web_form, document))
staging_folders = StagingFolder.objects.filter(enab
|
led=True)
for staging_folder in staging_folders:
tab_links.append(get_tab_link_for_source(staging_folder, document))
return {
'tab_links': tab_links,
SOURCE_CHOICE_WEB_FORM: web_forms,
SOURCE_CHOICE_STAGING: staging_folders
}
def upload_interactive(request, source_type=None, source_id=None, document_p
|
k=None):
subtemplates_list = []
if document_pk:
document = get_object_or_404(Document, pk=document_pk)
try:
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_NEW_VERSION])
except PermissionDenied:
AccessEntry.objects.check_access(PERMISSION_DOCUMENT_NEW_VERSION, request.user, document)
results = get_active_tab_links(document)
else:
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_CREATE])
document = None
results = get_active_tab_links()
context = {}
if results[SOURCE_CHOICE_WEB_FORM].count() == 0 and results[SOURCE_CHOICE_STAGING].count() == 0:
source_setup_link = mark_safe('<a href="%s">%s</a>' % (reverse('setup_web_form_list'), ugettext(u'here')))
subtemplates_list.append(
{
'name': 'generic_subtemplate.html',
'context': {
'title': _(u'Upload sources'),
'paragraphs': [
_(u'No interactive document sources have been defined or none have been enabled.'),
_(u'Click %(setup_link)s to add or enable some document sources.') % {
'setup_link': source_setup_link
}
],
}
})
document_type_id = request.GET.get('document_type_id', None)
if document_type_id:
document_type = get_object_or_404(DocumentType, pk=document_type_id)
else:
document_type = None
if source_type is None and source_id is None:
if results[SOURCE_CHOICE_WEB_FORM].count():
source_type = results[SOURCE_CHOICE_WEB_FORM][0].source_type
source_id = results[SOURCE_CHOICE_WEB_FORM][0].pk
elif results[SOURCE_CHOICE_STAGING].count():
source_type = results[SOURCE_CHOICE_STAGING][0].source_type
source_id = results[SOURCE_CHOICE_STAGING][0].pk
if source_type and source_id:
if source_type == SOURCE_CHOICE_WEB_FORM:
web_form = get_object_or_404(WebForm, pk=source_id)
context['source'] = web_form
if request.method == 'POST':
form = WebFormForm(request.POST, request.FILES,
document_type=document_type,
show_expand=(web_form.uncompress == SOURCE_UNCOMPRESS_CHOICE_ASK) and not document,
source=web_form,
instance=document
)
if form.is_valid():
try:
if document:
expand = False
else:
if web_form.uncompress == SOURCE_UNCOMPRESS_CHOICE_ASK:
expand = form.cleaned_data.get('expand')
else:
if web_form.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y:
expand = True
else:
expand = False
new_filename = get_form_filename(form)
result = web_form.upload_file(request.FILES['file'],
new_filename, use_file_name=form.cleaned_data.get('use_file_name', False),
document_type=document_type,
expand=expand,
metadata_dict_list=decode_metadata_from_url(request.GET),
user=request.user,
document=document,
new_version_data=form.cleaned_data.get('new_version_data')
)
if document:
messages.success(request, _(u'New document version uploaded successfully.'))
return HttpResponseRedirect(reverse('document_view_simple', args=[document.pk]))
else:
if result['is_compressed'] == None:
messages.success(request, _(u'File uploaded successfully.'))
|
lmmsoft/LeetCode
|
LeetCode-Algorithm/1147. Longest Chunked Palindrome Decomposition/1147.py
|
Python
|
gpl-2.0
| 896
| 0.002232
|
from collections import defaultdict
class Solution:
def longestDecomposition(self, text: str) -> int:
num = 0
L = len(text)
l, r = 0, L - 1
mp1 = defaultdict(int)
mp2 = defaultdict(int)
while l < r:
mp1[text[l]] += 1
mp2[text[r]] += 1
if mp1 == mp2:
num += 2
mp1 = defaultdict(int)
mp2 = defaultdict(int)
l += 1
r -
|
= 1
if not mp1 and not mp2 and l > r:
pass
else:
num += 1
return num
if __name__ == '__main__':
assert Solution().longestDecomposition("ghiabcdefhelloadamhelloabcdefghi") == 7
assert Solution().longestDecomposition("merchant") == 1
assert Solution().longestDecomposition("antaprezatepzapreanta") == 11
|
assert Solution().longestDecomposition("aaa") == 3
|
ntt-sic/python-cinderclient
|
cinderclient/openstack/common/apiclient/base.py
|
Python
|
apache-2.0
| 15,937
| 0.000063
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import urllib
from cinderclient.openstack.common.apiclient import exceptions
from cinderclient.openstack.common import strutils
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param **args: args to be passed to every hook function
:param **kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'
"""
body = self.client.get(url).json()
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
if return_raw:
return body[response_key]
return self.resource_c
|
lass(self, body[response_key])
def _put(self, url, json=None, response_key=No
|
ne):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
con
|
pliz/gunfolds
|
tools/comparison.py
|
Python
|
gpl-3.0
| 2,830
| 0.021555
|
import ecj
import scipy
import numpy
import operator
import networkx as nx
#from progressbar import ProgressBar, Percentage
numpy.random.RandomState()
import bfutils as bfu
import numpy as np
import gmpy as gmp
def num2CG(num,n):
"""num2CG - converts a number whose binary representaion encodes edge
presence/absence into a compressed graph representaion
"""
n2 = n*n
G = {'%i'%(i+1):{} for i in xrange(n)}
if num == 0: return G
bl = gmp.bit_length(num)
idx = [n2-i-1 for i in xrange(bl) if num & (1<<i)]
idx = np.unravel_index(idx,(n,n))
x = idx[0]+1
y = idx[1]+1
for i in xrange(len(x)):
G['%i' % x[i]]['%i' % y[i]] = set([(0,1)])
return G
def hasSelfLoops(G):
for u in G:
if G[u].has_key(u):
return True
return False
def randSCC(n):
G = num2CG(scipy.random.randint(2**(n**2)),n)
while (len(ecj.scc(G)) > 1) or gcd4scc(G)>1:
G = num2CG(scipy.random.randint(2**(n**2)),n)
return G
def SM_fixed(Gstar,G, iter=5):
compat = []
for j in range(0,iter):
if Gstar == ecj.undersample(G,j):
compat.append(j)
return compat
def SM_converging(Gstar,G):
"""Gstar is the undersampled reference graph, while G is the starti
|
ng
graph. The code searches over all undersampled version of G to
find all matches with Gstar
"""
compat = []
GG = G
Gprev = G
if G == Gstar: return [0]
j = 1
G = ecj.undersample(GG,j)
while not (G
|
== Gprev):
if Gstar == G: compat.append(j)
j += 1
Gprev = G
G = ecj.undersample(GG,j)
return compat
def searchMatch(Gstar,G, iter=5):
if gcd4scc(G) >1: return SM_fixed(Gstar, G, iter=iter)
return SM_converging(Gstar, G)
def hasSink(G):
return not reduce(operator.and_, [bool(G[n]) for n in G], True)
def hasRoot(G): return hasSink(ecj.tr(G))
def isSclique(G):
n = len(G)
for v in G:
if sum([(0,1) in G[v][w] for w in G[v]]) < n: return False
if sum([(2,0) in G[v][w] for w in G[v]]) < n-1: return False
return True
def graph2nx(G):
g = nx.DiGraph()
for v in G:
g.add_edges_from([(v,x) for x in G[v] if (0,1) in G[v][x]])
return g
def nx2graph(G):
g = {str(n+1):{} for n in G}
for n in G:
g['%i' % (n+1)] = {'%i' % (x+1):set([(0,1)]) for x in G[n]}
return g
def gcd4scc(SCC):
g = graph2nx(SCC)
return ecj.listgcd(map(lambda x: len(x)-1, nx.simple_cycles(g)))
def compatibleAtU(uGstar):
compat = []
n = len(uGstar)
numG = 2**(n**2)
#pbar = Percentage()
for i in range(1,numG):
G = num2CG(i,n)
#pbar.update(i+1)
if len(ecj.scc(G)) > 1: continue
l = searchMatch(uGstar,G, iter = 5)
if l: compat.append((l,G))
#pbar.finish()
return compat
|
aleksandr-bakanov/astropy
|
astropy/io/fits/hdu/compressed.py
|
Python
|
bsd-3-clause
| 87,677
| 0.00008
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import gc
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from .base import DELAYED, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX
from .image import ImageHDU
from .table import BinTableHDU
from astropy.io.fits import conf
from astropy.io.fits.card import Card
from astropy.io.fits.column import Column, ColDefs, TDEF_RE
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (_is_pseudo_unsigned, _unsigned_zero, _is_int,
_get_array_mmap)
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
try:
from astropy.io.fits import compression
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = True
except ImportError:
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = False
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: 'NO_DITHER',
SUBTRACTIVE_DITHER_1: 'SUBTRACTIVE_DITHER_1',
SUBTRACTIVE_DITHER_2: 'SUBTRACTIVE_DITHER_2'
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = ('RICE_1', 'GZIP_1', 'GZIP_2', 'PLIO_1', 'HCOMPRESS_1')
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = 'RICE_1'
DEFAULT_QUANTIZE_LEVEL = 16.
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {}
# CFITSIO version-specific features
if COMPRESSION_SUPPORTED:
try:
CFITSIO_SUPPORTS_GZIPDATA = compression.CFITSIO_VERSION >= 3.28
CFITSIO_SUPPORTS_Q_FORMAT = compression.CFITSIO_VERSION >= 3.35
if compression.CFITSIO_VERSION >= 3.35:
CMTYPE_ALIASES['RICE_ONE'] = 'RICE_1'
except AttributeError:
# This generally shouldn't happen unless running pip in an
# environment where an old build of pyfits exists
CFITSIO_SUPPORTS_GZIPDATA = True
CFITSIO_SUPPORTS_Q_FORMAT = True
COMPRESSION_KEYWORDS = {'ZIMAGE', 'ZCMPTYPE', 'ZBITPIX', 'ZNAXIS', 'ZMASKCMP',
'ZSIMPLE', 'ZTENSION', 'ZEXTEND'}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
'SIMPLE': 'ZSIMPLE', 'XTENSION': 'ZTENSION', 'BITPIX': 'ZBITPIX',
'NAXIS': 'ZNAXIS', 'EXTEND': 'ZEXTEND', 'BLOCKED': 'ZBLOCKED',
'PCOUNT': 'ZPCOUNT', 'GCOUNT': 'ZGCOUNT', 'CHECKSUM': 'ZHECKSUM',
'DATASUM': 'ZDATASUM'
}
_zdef_re = re.compile(r'(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?')
_compression_keywords = set(_keyword_remaps.values()).union(
['ZIMAGE', 'ZCMPTYPE', 'ZMASKCMP', 'ZQUANTIZ', 'ZDITHER0'])
_indexed_compression_keywords = {'ZNAXIS', 'ZTILE', 'ZNAME', 'ZVAL'}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
|
def __init__(self, table_header, image_header):
self._cards = image_header._cards
|
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith("HIERARCH "):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use o
|
statsmodels/statsmodels
|
statsmodels/datasets/nile/data.py
|
Python
|
bsd-3-clause
| 1,398
| 0.005722
|
"""Nile River Flows."""
import pandas as pd
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Nile River flows at Ashw
|
an 1871-1970"""
SOURCE = """
This data is first analyzed in:
Cobb, G. W. 1978. "The Problem of the Nile: Conditional Solution to a
Changepoint Problem." *Biometrika*. 65.2, 243-51.
"""
DESCRSHORT = """This dataset contains measurements on the annual flow of
the Nile as me
|
asured at Ashwan for 100 years from 1871-1970."""
DESCRLONG = DESCRSHORT + " There is an apparent changepoint near 1898."
#suggested notes
NOTE = """::
Number of observations: 100
Number of variables: 2
Variable name definitions:
year - the year of the observations
volumne - the discharge at Aswan in 10^8, m^3
"""
def load():
"""
Load the Nile data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas()
def load_pandas():
data = _get_data()
# TODO: time series
endog = pd.Series(data['volume'], index=data['year'].astype(int))
dataset = du.Dataset(data=data, names=list(data.columns), endog=endog, endog_name='volume')
return dataset
def _get_data():
return du.load_csv(__file__, 'nile.csv').astype(float)
|
openstack/neutron-lib
|
neutron_lib/exceptions/vlantransparent.py
|
Python
|
apache-2.0
| 902
| 0
|
# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require
|
d by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class VlanTransparencyDriverError(exceptions.NeutronException):
"""Vlan Transparency not suppo
|
rted by all mechanism drivers."""
message = _("Backend does not support VLAN Transparency.")
|
pearsonlab/thunder
|
test/test_seriesloader.py
|
Python
|
apache-2.0
| 10,333
| 0.002419
|
import glob
import json
import os
import struct
import unittest
from nose.tools import assert_almost_equal, assert_equals, assert_true, assert_raises
from numpy import allclose, arange, array, array_equal
from numpy import dtype as dtypeFunc
from thunder.rdds.fileio.seriesloader import SeriesLoader
from thunder.utils.common import smallestFloatType
from test_utils import PySparkTestCase, PySparkTestCaseWithOutputDir
_have_image = False
try:
from PIL import Image
_have_image = True
except ImportError:
# PIL not available; skip tests that require it
Image = None
class SeriesBinaryTestData(object):
"""
Data object for SeriesLoader binary test.
"""
__slots__ = ('keys', 'vals', 'keyDtype', 'valDtype')
def __init__(self, keys, vals, keyDtype, valDtype):
"""
Constructor, intended to be called from fromArrays class factory method.
Expects m x n and m x p data for keys and vals.
Parameters
----------
keys: two dimensional array or sequence
vals: two dimensional array or sequence
keydtype: object castable to numpy dtype
data type of keys
valdtype: object castable to numpy dtype
data type of values
Returns
-------
self: new instance of SeriesBinaryTestData
"""
self.keys = keys
self.vals = vals
self.keyDtype = keyDtype
self.valDtype = valDtype
@property
def keyStructFormat(self):
return self.keyDtype.char * self.nkeys
@property
def valStructFormat(self):
return self.valDtype.char * self.nvals
@property
def data(self):
return zip(self.keys, self.vals)
@property
def nkeys(self):
return len(self.keys[0])
@property
def nvals(self):
return len(self.vals[0])
def writeToFile(self, f):
"""
Writes own key, value data to passed file handle in binary format
Parameters
----------
f: file handle, open for writing
f will remain open after this call
"""
for keys, vals in self.data:
f.write(struct.pack(self.keyStructFormat, *keys))
f.write(struct.pack(self.valStructFormat, *vals))
@staticmethod
def _validateLengths(dat):
l = len(dat[0])
for d in dat:
assert len(d) == l, "Data of unequal lengths, %d and %d" % (l, len(d))
@staticmethod
def _normalizeDType(dtypeInstance, data):
if dtypeInstance is None:
return data.dtype
return dtypeFunc(dtypeInstance)
@classmethod
def fromArrays(cls, keys, vals, keyDtype=None, valDtype=None):
"""
Factory method for SeriesBinaryTestData. Validates input before calling class __init__ method.
Expects m x n and m x p data for keys and vals.
Parameters
----------
keys: two dimensional array or sequence
vals: two dimensional array or sequence
keydtype: object castable to numpy dtype
data type of keys
valdtype: object castable to numpy dtype
data type of values
Returns
-------
self: new instance of SeriesBinaryTestData
"""
keyDtype = cls._normalizeDType(keyDtype, keys)
valDtype = cls._normalizeDType(valDtype, vals)
assert len(keys) == len(vals), "Unequal numbers of keys and values, %d and %d" % (len(keys), len(vals))
cls._validateLengths(keys)
cls._validateLengths(vals)
return cls(keys, vals, keyDtype, valDtype)
class TestSeriesLoader(PySparkTestCase):
@staticmethod
def _findTestResourcesDir(resourcesDirName="resources"):
testDirPath = os.path.dirname(os.path.realpath(__file__))
testResourcesDirPath = os.path.join(testDirPath, resourcesDirName)
if not os.path.isdir(testResourcesDirPath):
raise IOError("Test resources directory "+testResourcesDirPath+" not found")
return testResourcesDirPath
@staticmethod
def _findSourceTreeDir(dirName="utils/data"):
testDirPath = os.path.dirname(os.path.realpath(__file__))
testResourcesDirPath = os.path.join(testDirPath, "..", "thunder", dirName)
if not os.path.isdir(testResourcesDirPath):
raise IOError("Directory "+testResourcesDirPath+" not found")
return testResourcesDirPath
def test_fromArrays(self):
ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4))
series = SeriesLoader(self.sc).fromArraysAsImages(ary)
seriesVals = series.collect()
seriesAry = series.pack()
# check ordering of keys
assert_equals((0, 0), seriesVals[0][0]) # first key
assert_equals((1, 0), seriesVals[1][0]) # second key
assert_equals((2, 0), seriesVals[2][0])
assert_equals((3, 0), seriesVals[3][0])
assert_equals((0, 1), seriesVals[4][0])
assert_equals((1, 1), seriesVals[5][0])
assert_equals((2, 1), seriesVals[6][0])
assert_equals((3, 1), seriesVals[7][0])
# check dimensions tuple is reversed from numpy shape
assert_equals(ary.shape[::-1], series.dims.count)
# check that values are in original order
collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16')).ravel()
assert_true(array_equal(ary.ravel(), collectedVals))
# check that packing returns transpose of original array
assert_true(array_equal(ary.T, seriesAry))
def test_fromMultipleArrays(self):
ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4))
ary2 = arange(8, 16, dtype=dtypeFunc('int16')).reshape((2, 4))
series = SeriesLoader(self.sc).fromArraysAsImages([ary, ary2])
seriesVals = series.collect()
seriesAry = series.pack()
# check ordering of keys
assert_equals((0, 0), seriesVals[0][0]) # first key
assert_equals((1, 0), seriesVals[1][0]) # second key
assert_equals((3, 0), seriesVals[3][0])
assert_equals((0, 1), seriesVals[4][0])
assert_equals((3, 1), seriesVals[7][0])
# check dimensions tuple is reversed from numpy shape
assert_equals(ary.shape[::-1], series.dims.count)
# check that values are in original order, with subsequent point concatenated in values
collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16'))
assert_true(array_equal(ary.ravel(), collectedVals[:, 0]))
assert_true(array_equal(ary2.ravel(), collectedVals[:, 1]))
# check that packing returns concatenation of input arrays, with time as first dimension
assert_true(array_equal(ary.T, seriesAry[0]))
assert_true(array_equal(ary2.T, seriesAry[1]))
class TestSeriesBinaryLoader(PySparkTestCaseWithOutputDir):
def _run_tst_fromBinary(self, useConfJson=False):
# run this as a single big test so as to avoid repeated setUp and
|
tearDown of the spark context
# data will be a sequence of test data
# all keys and all values in a test data item must be of the same length
# keys get converted to ints regardless of raw input format
DATA = [
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13
|
]], 'int16', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3], [5, 6, 7]], [[11], [12]], 'int16', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int32'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int32', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'int16', 'float32'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'float32', 'float32'),
SeriesBinaryTestData.fromArrays([[2, 3, 4]], [[11.0, 12.0, 13.0]], 'float32', 'float32'),
]
for itemidx, item in enumerate(DATA):
outSubdir = os.path.join(self.outputdir, 'input%d' % itemidx)
os.mkdir(outSubdir)
fname = os.path.join(outSubdir, 'inputfile%d.bin' % itemidx)
|
SUSE/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/experiments.py
|
Python
|
mit
| 931
| 0
|
# coding=utf-8
# -----------------
|
---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# -----------------
|
---------------------------------------------------------
from msrest.serialization import Model
class Experiments(Model):
"""Routing rules in production experiments.
:param ramp_up_rules: List of ramp-up rules.
:type ramp_up_rules: list of :class:`RampUpRule
<azure.mgmt.web.models.RampUpRule>`
"""
_attribute_map = {
'ramp_up_rules': {'key': 'rampUpRules', 'type': '[RampUpRule]'},
}
def __init__(self, ramp_up_rules=None):
self.ramp_up_rules = ramp_up_rules
|
Shugabuga/LapisMirror
|
plugins/gifscom.py
|
Python
|
mit
| 3,379
| 0.000296
|
# The MIT License (MIT)
# Copyright (c) 2015 kupiakos
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import re
import html
from urllib.parse import urlsplit
import traceback
import requests
import mimeparse
import praw
class GifscomPlugin:
"""
Mirrors gifs.com images.
Created by /u/EliteMasterEric
"""
def __init__(self, useragent: str, **options):
"""Initialize the puush importer.
:param useragent: The useragent to use for querying gifs.com.
:param options: Other options in the configuration. Ignored.
"""
self.log = logging.getLogger('lapis.gifscom')
self.headers = {'User-Agent': useragent}
self.regex = re.compile(r'gifs\.com$')
def import_submission(self, submission: praw.objects.Submission) -> dict:
"""Import a submission from gifs.com.
Because this downloads the page and tries to scrape the HTML,
we are at significant risk of the image ID on the DOM changing.
Therefore, this plugin is liable to break.
This function will define the following values in its return data:
- author: simply "an anonymous user on gifs.com"
- source: The url of the submission
- importer_display/header
- import_urls
:param submission: A reddit submission to parse.
"""
try:
url = html.unescape(submission.url)
if not self.regex.match(urlsplit(url).netloc):
return None
data = {'author': 'a gifscom user',
'sourc
|
e': url,
'importer_display':
{'header': 'Mirrored gifscom image:\n\n'}}
r = requests.head(url, headers=self.headers)
mime_text = r.headers.get('Content-Type')
mime = mimeparse.parse_mime_type(mime_text)
if mime[0] == 'image':
image_url = url
else:
self.log.warning('gifs.com URL posted that is no
|
t an image: %s', submission.url)
return None
data['import_urls'] = [image_url]
return data
except Exception:
self.log.error('Could not import gifs.com URL %s (%s)',
submission.url, traceback.format_exc())
return None
__plugin__ = GifscomPlugin
# END OF LINE.
|
idea4bsd/idea4bsd
|
python/testData/copyPaste/LineToBegin.dst.py
|
Python
|
apache-2.0
| 23
| 0.086957
|
p<caret>rint
|
1
print
|
3
|
coinapi/coinapi-sdk
|
oeml-sdk/python/openapi_client/api/__init__.py
|
Python
|
mit
| 219
| 0.004566
|
# do not import all apis into this module b
|
ecause that uses a lot of memory and stack frames
# if you need the ability to import all apis from one package, import them with
# from openapi_client.apis i
|
mport BalancesApi
|
pupapaik/contrail-kubernetes
|
scripts/opencontrail-kubelet/opencontrail_kubelet/vrouter_api.py
|
Python
|
apache-2.0
| 1,675
| 0
|
#
# Copyright (c) 2015 Juniper Networks, Inc.
#
import json
import logging
import requests
VROUTER_AGENT_PORT = 9091
class ContrailVRouterApi(object):
def __init__(self):
pass
def add_p
|
ort(self, instanceId, nicId, sysIfName, macAddress, **kwargs):
data = {
"id": nicId,
"instance-id": instanceId,
"system-name": sysIfName,
"mac-address": macAddress,
"vn-id": "00000000-0000-0000-0000-000000000001",
"vm-project-id": "00000000-0000-0000-0000-000000000001",
"ip-address": "0.0.0.0",
"
|
ip6-address": "0::0",
"rx-vlan-id": 0,
"tx-vlan-id": 0,
"type": 0
}
if 'display_name' in kwargs:
data['display-name'] = kwargs['display_name']
if 'port_type' in kwargs:
if kwargs['port_type'] == "NovaVMPort":
data['type'] = 0
if kwargs['port_type'] == "NameSpacePort":
data['type'] = 1
json_data = json.dumps(data)
url = "http://localhost:%d/port" % (VROUTER_AGENT_PORT)
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json_data, headers=headers)
if r.status_code != requests.codes.ok:
logging.error("%s: %s", url, r.text)
def delete_port(self, nicId):
url = "http://localhost:%d/port/%s" % (VROUTER_AGENT_PORT, nicId)
headers = {'content-type': 'application/json'}
r = requests.delete(url, data=None, headers=headers)
if r.status_code != requests.codes.ok:
logging.error("%s: %s", url, r.headers['status'])
|
zodiac/incubator-airflow
|
tests/api/common/mark_tasks.py
|
Python
|
apache-2.0
| 9,129
| 0.000548
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow import models
from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns
from airflow.settings import Session
from airflow.utils.dates import days_ago
from airflow.utils.state import State
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(2), days_ago(1)]
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
self.session = Session()
def snapshot_state(self, dag, execution_dates):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.session.expunge_all()
return tis
def verify_state(self, dag, task_ids, execution_dates, state, old_tis):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis:
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
else:
for old_ti in old_tis:
if (old_ti.task_id == ti.task_id
and old_ti.execution_date == ti.execution_date):
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
|
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(task=task, execution_date=se
|
lf.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
|
RossBrunton/BMAT
|
bookmarks/migrations/0002_bookmark_valid_url.py
|
Python
|
mit
| 399
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_l
|
iterals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='valid_url',
field=models.
|
BooleanField(default=False),
),
]
|
UUDigitalHumanitieslab/timealign
|
stats/migrations/0009_auto_20180620_1232.py
|
Python
|
mit
| 583
| 0.001715
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-06-20 12:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stats', '0008_scenario_mds_allow_partial'),
]
operations = [
migrations.AlterField(
|
model_name='scenario',
na
|
me='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='scenarios', to=settings.AUTH_USER_MODEL),
),
]
|
platformio/platformio
|
platformio/util.py
|
Python
|
apache-2.0
| 14,440
| 0.001108
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import platform
import re
import socket
import sys
import time
from contextlib import contextmanager
from functools import wraps
from glob import glob
import click
import requests
from platformio import __apiurl__, __version__, exception
from platformio.commands import PlatformioCLI
from platformio.compat import PY2, WINDOWS
from platformio.fs import cd # pylint: disable=unused-import
from platformio.fs import load_json # pylint: disable=unused-import
from platformio.fs import rmtree as rmtree_ # pylint: disable=unused-import
from platformio.proc import exec_command # pylint: disable=unused-import
from platformio.proc import is_ci # pylint: disable=unused-import
# KEEP unused imports for backward compatibility with PIO Core 3.0 API
class memoized(object):
def __init__(self, expire=0):
expire = str(expire)
if expire.isdigit():
expire = "%ss" % int((int(expire) / 1000))
tdmap = {"s": 1, "m": 60, "h": 3600, "d": 86400}
assert expire.endswith(tuple(tdmap))
self.expire = int(tdmap[expire[-1]] * int(expire[:-1]))
self.cache = {}
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in self.cache or (
self.expire > 0 and self.cache[key][0] < time.time() - self.expire
):
self.cache[key] = (time.time(), func(*args, **kwargs))
return self.cache[key][1]
wrapper.reset = self._reset
return wrapper
def _reset(self):
|
self.cache.clear()
class throttle(object):
def __init__(self, threshhold):
self.threshhold = threshhold # milliseconds
self.last = 0
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
diff = int(round((time.time() - self.last) * 1000))
if diff < self.threshhold:
time.
|
sleep((self.threshhold - diff) * 0.001)
self.last = time.time()
return func(*args, **kwargs)
return wrapper
def singleton(cls):
""" From PEP-318 http://www.python.org/dev/peps/pep-0318/#examples """
_instances = {}
def get_instance(*args, **kwargs):
if cls not in _instances:
_instances[cls] = cls(*args, **kwargs)
return _instances[cls]
return get_instance
@contextmanager
def capture_std_streams(stdout, stderr=None):
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stderr or stdout
yield
sys.stdout = _stdout
sys.stderr = _stderr
def get_systype():
type_ = platform.system().lower()
arch = platform.machine().lower()
if type_ == "windows":
arch = "amd64" if platform.architecture()[0] == "64bit" else "x86"
return "%s_%s" % (type_, arch) if arch else type_
def pioversion_to_intstr():
vermatch = re.match(r"^([\d\.]+)", __version__)
assert vermatch
return [int(i) for i in vermatch.group(1).split(".")[:3]]
def change_filemtime(path, mtime):
os.utime(path, (mtime, mtime))
def get_serial_ports(filter_hwid=False):
try:
# pylint: disable=import-outside-toplevel
from serial.tools.list_ports import comports
except ImportError:
raise exception.GetSerialPortsError(os.name)
result = []
for p, d, h in comports():
if not p:
continue
if WINDOWS and PY2:
try:
# pylint: disable=undefined-variable
d = unicode(d, errors="ignore")
except TypeError:
pass
if not filter_hwid or "VID:PID" in h:
result.append({"port": p, "description": d, "hwid": h})
if filter_hwid:
return result
# fix for PySerial
if not result and "darwin" in get_systype():
for p in glob("/dev/tty.*"):
result.append({"port": p, "description": "n/a", "hwid": "n/a"})
return result
# Backward compatibility for PIO Core <3.5
get_serialports = get_serial_ports
def get_logical_devices():
items = []
if WINDOWS:
try:
result = exec_command(
["wmic", "logicaldisk", "get", "name,VolumeName"]
).get("out", "")
devicenamere = re.compile(r"^([A-Z]{1}\:)\s*(\S+)?")
for line in result.split("\n"):
match = devicenamere.match(line.strip())
if not match:
continue
items.append({"path": match.group(1) + "\\", "name": match.group(2)})
return items
except WindowsError: # pylint: disable=undefined-variable
pass
# try "fsutil"
result = exec_command(["fsutil", "fsinfo", "drives"]).get("out", "")
for device in re.findall(r"[A-Z]:\\", result):
items.append({"path": device, "name": None})
return items
result = exec_command(["df"]).get("out")
devicenamere = re.compile(r"^/.+\d+\%\s+([a-z\d\-_/]+)$", flags=re.I)
for line in result.split("\n"):
match = devicenamere.match(line.strip())
if not match:
continue
items.append({"path": match.group(1), "name": os.path.basename(match.group(1))})
return items
def get_mdns_services():
# pylint: disable=import-outside-toplevel
try:
import zeroconf
except ImportError:
from site import addsitedir
from platformio.managers.core import get_core_package_dir
contrib_pysite_dir = get_core_package_dir("contrib-pysite")
addsitedir(contrib_pysite_dir)
sys.path.insert(0, contrib_pysite_dir)
import zeroconf # pylint: disable=import-outside-toplevel
class mDNSListener(object):
def __init__(self):
self._zc = zeroconf.Zeroconf(interfaces=zeroconf.InterfaceChoice.All)
self._found_types = []
self._found_services = []
def __enter__(self):
zeroconf.ServiceBrowser(self._zc, "_services._dns-sd._udp.local.", self)
return self
def __exit__(self, etype, value, traceback):
self._zc.close()
def remove_service(self, zc, type_, name):
pass
def add_service(self, zc, type_, name):
try:
assert zeroconf.service_type_name(name)
assert str(name)
except (AssertionError, UnicodeError, zeroconf.BadTypeInNameException):
return
if name not in self._found_types:
self._found_types.append(name)
zeroconf.ServiceBrowser(self._zc, name, self)
if type_ in self._found_types:
s = zc.get_service_info(type_, name)
if s:
self._found_services.append(s)
def get_services(self):
return self._found_services
items = []
with mDNSListener() as mdns:
time.sleep(3)
for service in mdns.get_services():
properties = None
if service.properties:
try:
properties = {
k.decode("utf8"): v.decode("utf8")
if isinstance(v, bytes)
else v
for k, v in service.properties.items()
}
json.dumps(properties)
except UnicodeDecodeError:
properties = None
items.append(
{
|
rborn/TiSocial.Framework
|
build.py
|
Python
|
mit
| 6,767
| 0.041377
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('
|
=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn
|
't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','dk.napp.social.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','DkNappDrawerModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
igrowing/Orchids
|
manage.py
|
Python
|
mit
| 805
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "orchids.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
|
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed a
|
nd "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
daveinnyc/various
|
python-practice/class_demos.py
|
Python
|
mit
| 3,833
| 0.00574
|
# Notes on classes
class Sample():
def __init__(self, name, number):
self.name = name
self.number = number
def print_values(self):
print(f"name: {self.name}")
print(f"number: {self.number}")
class SampleWithProperties():
def __init__(self, name, number):
self.name = name
self.number = number
@property
def name(self):
# double underscore is to tell future devs to avoid variable
return self.__name
@property
def double_name(self):
# Can return calculated or other values besides fields
return 2 * self.__name
@property
def number(self):
return self.__number
@name.setter
def name(self, value):
# Often has some sort of validation or transformation code
self.__name = value
@number.setter
def number(self, value):
# Often has some sort of validation or transformation code
self.__number = value % 2
class SuperClass():
def __init__(self, name):
self.name = name
def speak(self):
print(f"Hey, ho {self.name}")
class SubClass(SuperClass):
def __init__(self, name, location):
super().__init__(name)
self.location = location
def shout_out(self):
print(f"{self.location} is where it's at")
def speak(self):
# Need to explicitly over ride parent methods
# calling it here, eg, super().speak()
# just calls it. If super.method() is not
# called, then only this code would run
print(f"{self.locat
|
ion}, l
|
et's go! ")
if __name__ == "__main__":
'''
# Demo Sample()
instance = Sample("fred", 3)
instance.print_values()
print(f"Access name field directly: {instance.name}")
instance.number += 100
print(f"Access number field directly: {instance.number}")
'''
'''
# Demo SampleWithProperties()
instance_with_props = SampleWithProperties("fred", 3)
# Directly accessing values
# Next line fails
# print(f"Access name field, direct: {instance_with_props.__name}")
# Python rewrites value names with intial __ to protect namespace
# not really a private value, but less likely to be accessed
print(f"Access name field, direct: {instance_with_props._SampleWithProperties__name}")
# Using getter to access values, looks like direct access but isn't
# name field
print(f"Access name field, getter: {instance_with_props.name}")
print(f"Access name field, getter: {instance_with_props.double_name}")
instance_with_props.name = "Barney"
print(f"Access name field, after setter: {instance_with_props.name}")
# number field
print(f"Access number field, before setter: {instance_with_props.number}")
instance_with_props.number = 4
print(f"Access number field, after setter: {instance_with_props.number}")
instance_with_props.number = 3
print(f"Access number field, after setter: {instance_with_props.number}")
'''
# Demo inheritance
# Show super class functions
instance_super = SuperClass("Johnny")
print(f"Name, super: {instance_super.name}")
print("")
# Show sub inherits name, methods
instance_sub = SubClass("Joey", "Lower East Side")
print(f"Name, super: {instance_sub.name}")
print(f"Method from super: ", end="")
instance_sub.super().speak()
print("")
# Show sub can override parent
print(f"Overide from super: ", end="")
instance_sub.speak()
# Figure out how to call the super method from the instance rather than from the class definition
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/move_base_force_cancel.py
|
Python
|
gpl-3.0
| 1,191
| 0.008396
|
#! /usr/bin/env python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseActionGoal
from actionlib_msgs.msg import GoalID
class ForceCancel(object):
def __init__(self, nodename="force_cancel", is_newnode=True, repetition=10):
self.repetition = rospy.get_param("~repetition", repetition)
if is_newnode:
rospy.init_node(name=nodename, anonymous=False)
rospy.on_shutdown(self.shutdown)
pub = rospy.Publisher("move_base/cancel", GoalID, queue_size=1)
sub = rospy.Subscriber("move_base/goal", MoveBaseActionGoal, self.callback, queue_size=1)
rospy.wait_for_message(
|
"move_base/goal", MoveBaseActionGoal, 60)
r = rospy.Rate(1)
counter = 0
while not rospy.is_shutdown() and (counter < self.repetition):
msg = GoalID()
msg.id = self.id
pub.publish(msg)
|
r.sleep()
counter += 1
def callback(self, msg):
self.id = msg.goal_id.id
def shutdown(self):
rospy.loginfo("cancel job finished")
rospy.sleep(1)
pass
if __name__ == "__main__":
fc = ForceCancel('force_cancel', False, 5)
|
enoordeh/Pangloss
|
Calibrate.py
|
Python
|
gpl-2.0
| 10,551
| 0.013079
|
#!/usr/bin/env python
# ======================================================================
import pangloss
import sys,getopt,cPickle,numpy
import scipy.stats as stats
# ======================================================================
def Calibrate(argv):
"""
NAME
Calibrate.py
PURPOSE
Transform the results of the lightcone reconstruction process,
Pr(kappah|D), into our target PDF, Pr(kappa|D).
COMMENTS
All PDF input is provided as a list of samples. There are two
modes of operation:
1) The Pr(kappah|C) for an ensemble of calibration lightcones are
compressed into a single number (currently the
median), and then combined with the true kappa values to make
Pr(kappa,kappah|C). This is written out as a 2D sample list.
2) The Pr(kappah|D) for a single observed lightcone is compressed
into a single number (currently the median). This is then used
to take a slice from Pr(kappa,kappah|C) to make Pr(kappa|D,C).
Both 1 and 2 can be carried out in series if desired (Mode=3).
FLAGS
-h Print this message [0]
INPUTS
configfile Plain text file containing Pangloss configuration
OPTIONAL INPUTS
--mode Operating mode 1,2 or 3. See COMMENTS above.
OUTPUTS
stdout Useful information
samples From 1) Pr(kappa,kappah|C) or 2) Pr(kappa|D,C)
EXAMPLE
Calibrate.py example.config
BUGS
AUTHORS
This file is part of the Pangloss project, distributed under the
GPL v2, by Tom Collett (IoA) and Phil Marshall (Oxford).
Please cite: Collett et al 2013, http://arxiv.org/abs/1303.6564
HISTORY
2013-03-21 started Collett & Marshall (Oxford)
"""
# --------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,"hm:",["help","mode"])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
print Calibrate.__doc__ # will print the big comment above.
return
Mode=3
for o,a in opts:
if o in ("-h", "--help"):
print Calibrate.__doc__
return
elif o in ("-m", "--mode"):
Mode = int(a)
assert Mode < 4 and Mode >0, "unhandled Mode"
else:
assert False, "unhandled option"
# Check for setup file in array args:
if len(args) == 1:
configfile = args[0]
print pangloss.doubledashedline
print pangloss.hello
print pangloss.doubledashedline
print "Calibrate: transforming Pr(kappah|D) to Pr(kappa|D)"
print "Calibrate: taking instructions from",configfile
else:
print Calibrate.__doc__
return
# --------------------------------------------------------------------
# Read in configuration, and extract the ones we need:
experiment = pangloss.Configuration(configfile)
EXP_NAME = experiment.parameters['ExperimentName']
Nc = experiment.parameters['NCalibrationLightcones']
comparator=experiment.parameters['Comparator']
comparatorType=experiment.parameters['ComparatorType']
comparatorWidth=experiment.parameters['ComparatorWidth']
# Figure out which mode is required:
ModeName = experiment.parameters['CalibrateMode']
if ModeName=='Joint': Mode = 1
if ModeName=='Slice': Mode = 2
if ModeName=='JointAndSlice': Mode = 3
CALIB_DIR = experiment.parameters['CalibrationFolder'][0]
jointdistfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'.pickle'
jointdistasPDFfile= CALIB_DIR+'/'+comparator+'_'+comparatorType+'_asPDF.pickle'
# Final result is PDF for kappa:
x = experiment.parameters['ObservedCatalog'][0]
resultfile = x.split('.')[0]+"_"+EXP_NAME+"_PofKappa.pickle"
# --------------------------------------------------------------------
# Mode 1: generate a joint distribution, eg Pr(kappah,kappa)
# from the calibration dataset:
if Mode==1 or Mode==3:
print pangloss.dashedline
# First find the calibration pdfs for kappa_h:
calpickles = []
for i in range(Nc):
calpickles.append(experiment.getLightconePickleName('simulated',pointing=i))
calresultpickles=[]
if comparator=="Kappah" and comparatorType=="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_median.pickle"
calresultpickles.append(pfile)
elif comparator=="Kappah" and comparatorType!="median":
for i in range(Nc):
x = calpickles[i]
pfile = x.split('.')[0].split("_lightcone")[0]+"_"+EXP_NAME+"_KappaHilbert_Kappah_"+comparatorType+".pickle"
calresultpickles.append(pfile)
else:
print "Calibrate: Unrecognised comparator "+Comparator
print "Calibrate: If you want to use a comparator other than kappa_h, "
print "Calibrate: you'll need to c
|
ode it up!"
print "Calibrate: (This should be easy, but you can ask tcollett@ast.cam.uk for help)."
exit()
# Now calculate comparators:
callist=numpy.empty((Nc,2))
jd=pangloss.PDF(["kappa_
|
ext",comparator+'_'+comparatorType])
for i in range(Nc):
C = calresultpickles[i]
pdf = pangloss.readPickle(C)
if comparator=="Kappah":
if comparatorType=="median":
# Recall that we created a special file for this
# choice of comparator and comparator type, in
# Reconstruct. You could also use the
# comparatortype=="mean" code, swapping mean for median.
callist[i,0]=pdf[0]
callist[i,1]=pdf[1][0]
elif comparatorType=="mean":
callist[i,0] = pdf.truth[0]
callist[i,1] = numpy.mean(pdf.samples)
else:
print "Calibrate: Unrecognised comparatorType "+comparatorType
print "Calibrate: If you want to use a comparatorType other than median "
print "Calibrate: or mean, you'll need to code it up!"
print "Calibrate: (This should be easy, but you can ask tcollett@ast.cam.uk for help)."
exit()
jd.append(callist[i])
pangloss.writePickle(callist,jointdistfile)
# Also store the joint dist as a pangloss pdf:
pangloss.writePickle(jd,jointdistasPDFfile)
# Plot:
plotfile = jointdistasPDFfile.split('.')[0]+'.png'
jd.plot("Kappah_median","kappa_ext",weight=None,output=plotfile,title="The joint distribution of $\kappa_{\mathrm{ext}}$ and calibrator \n\n (more correlated means a better calibrator!)")
print "Calibrate: calibration joint PDF saved in:"
print "Calibrate: "+jointdistfile
print "Calibrate: and "+jointdistasPDFfile
print "Calibrate: you can view this PDF in "+plotfile
# --------------------------------------------------------------------
# Mode 2: calibrate a real line of sight's Pr(kappah|D) using the
# joint distribution Pr(kappa,<kappah>|D)
if Mode==2 or Mode==3:
print pangloss.dashedline
callibguide = pangloss.readPickle(jointdistfile)
obspickle = experiment.getLightconePickleName('real')
pfile = obspickle.split('.')[0].split("_lightcone")[0]+'_'+EXP_NAME+"_PofKappah.pickle"
pdf=pangloss.readPickle(pfile)
if comparator=="Kappah":
if comparatorType=="median":# note we created a special file for this choice of comparator and comparator type. You could also use the comparatortype=="mean" code swapping mean for median.
RealComparator=numpy.median(pdf.samples)
elif comparatorType=="mean":
|
asajeffrey/servo
|
tests/wpt/web-platform-tests/content-security-policy/embedded-enforcement/support/echo-required-csp.py
|
Python
|
mpl-2.0
| 1,679
| 0.006552
|
import json
from wptserve.utils import isomorphic_decode
def main(request, response):
message = {}
header = request.headers.get(b"Test-Header-Injection");
message[u'test_header_injection'] = isomorphic_decode(header) if header else None
header = request.headers.get(b"Sec-Required-CSP");
message[u'required_csp'] = isomorphic_decode(header) if header else None
header = request.headers.get(b"Sec-Required-CSP");
message[u'required_csp'] = isomorphic_decode(header) if header else None
second_level_iframe_code = u""
if b"include_second_level_iframe" in request.GET:
if b"second_level_iframe_csp" in request.GET and request.GET[b"second_level_iframe_csp"] != b"":
second_level_iframe_code = u'''<script>
var i2 = document.createElement('iframe');
i2.src = 'echo-required-csp.py';
i2.csp = "{0}";
document.body.appendChild(i2);
</script>'''.fo
|
rmat(isomorphic_decode(request.GET[b"second_level_iframe_csp"]))
else:
second_level_iframe_code = u'''<script>
var i2 = document.createElement('iframe');
i2.src = 'echo-required-csp.py';
document.body.appendChild(i2);
</script>'''
return [(b"Content-Type", b"text/html"), (b"Allow-CSP-From", b"*")], u'''
<!DOCTYPE html>
<ht
|
ml>
<head>
<!--{2}-->
<script>
window.addEventListener('message', function(e) {{
window.parent.postMessage(e.data, '*');
}});
window.parent.postMessage({0}, '*');
</script>
</head>
<body>
{1}
</body>
</html>
'''.format(json.dumps(message), second_level_iframe_code, str(request.headers))
|
jacobajit/ion
|
intranet/apps/auth/backends.py
|
Python
|
gpl-2.0
| 6,021
| 0.002159
|
# -*- coding: utf-8 -*-
import logging
import os
import re
import uuid
from django.conf import settings
from django.contrib.auth.hashers import check_password
import pexpect
from ..users.models import User
logger = logging.getLogger(__name__)
class KerberosAuthenticationBackend(object):
"""Authenticate using Kerberos.
This is the default authentication backend.
"""
@staticmethod
def kinit_timeout_handle(username, realm):
"""Check if the user exists before we throw an error.
If the user does not exist in LDAP, only throw a warning.
"""
try:
User.get_user(username=username)
except User.DoesNotExist:
logger.warning("kinit timed out for {}@{} (invalid user)".format(username, realm))
return
logger.critical("kinit timed out for {}@{}".format(username, realm))
@staticmethod
def get_kerberos_ticket(username, password):
"""Attempts to create a Kerberos ticket for a user.
Args:
username
The username.
password
The password.
Returns:
Boolean indicating success or failure of ticket creation
"""
cache = "/tmp/ion-%s" % uuid.uuid4()
logger.debug("Setting KRB5CCNAME to 'FILE:{}'".format(cache))
os.environ["KRB5CCNAME"] = "FILE:" + cache
try:
realm = settings.CSL_REALM
kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT)
kinit.expect(":")
kinit.sendline(password)
kinit.expect(pexpect.EOF)
kinit.close()
exitstatus = kinit.exitstatus
except pexpect.TIMEOUT:
KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)
exitstatus = 1
if exitstatus != 0:
realm = settings.AD_REALM
try:
kinit = pexpect.spawnu("/usr/bin/kinit {}@{}".format(username, realm), timeout=settings.KINIT_TIMEOUT)
kinit.expect(":", tim
|
eout=5)
kinit.sendline(password)
kinit.expect(pexpect.EOF)
kinit.close()
exitstatus = kinit.exitstatus
except pexpect.TIMEOUT:
KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)
exitstatus = 1
if exitstatus == 0:
logger.de
|
bug("Kerberos authorized {}@{}".format(username, realm))
return True
else:
logger.debug("Kerberos failed to authorize {}".format(username))
if "KRB5CCNAME" in os.environ:
del os.environ["KRB5CCNAME"]
return False
def authenticate(self, username=None, password=None):
"""Authenticate a username-password pair.
Creates a new user if one is not already in the database.
Args:
username
The username of the `User` to authenticate.
password
The password of the `User` to authenticate.
Returns:
`User`
NOTE: None is returned when the user account does not exist. However,
if the account exists but does not exist in LDAP, which is the case for
former and future students who do not have Intranet access, a dummy user
is returned that has the flag is_active=False. (The is_active property in
the User class returns False when the username starts with "INVALID_USER".)
"""
# remove all non-alphanumerics
username = re.sub('\W', '', username)
krb_ticket = self.get_kerberos_ticket(username, password)
if not krb_ticket:
return None
else:
logger.debug("Authentication successful")
try:
user = User.get_user(username=username)
except User.DoesNotExist:
# Shouldn't happen
logger.error("User {} successfully authenticated but not found " "in LDAP.".format(username))
user, status = User.objects.get_or_create(username="INVALID_USER", id=99999)
return user
def get_user(self, user_id):
"""Returns a user, given his or her user id. Required for a custom authentication backend.
Args:
user_id
The user id of the user to fetch.
Returns:
User or None
"""
try:
return User.get_user(id=user_id)
except User.DoesNotExist:
return None
class MasterPasswordAuthenticationBackend(object):
"""Authenticate as any user against a master password whose hash is in secret.py.
Forces a simple LDAP bind.
"""
def authenticate(self, username=None, password=None):
"""Authenticate a username-password pair.
Creates a new user if one is not already in the database.
Args:
username
The username of the `User` to authenticate.
password
The master password.
Returns:
`User`
"""
if check_password(password, settings.MASTER_PASSWORD):
try:
user = User.get_user(username=username)
except User.DoesNotExist:
logger.debug("Master password correct, user does not exist")
return None
logger.debug("Authentication with master password successful")
return user
logger.debug("Master password authentication failed")
return None
def get_user(self, user_id):
"""Returns a user, given his or her user id. Required for a custom authentication backend.
Args:
user_id
The user id of the user to fetch.
Returns:
User or None
"""
try:
return User.get_user(id=user_id)
except User.DoesNotExist:
return None
|
A-Manning/FStar
|
src/tools/updateDiscriminators.py
|
Python
|
apache-2.0
| 419
| 0.019093
|
import sys
import subprocess
result=subprocess.check_output("grep -
|
or 'is_[A-Z]\w*' .", shell=True)
lines=[ l for l in str(result).splitlines() if l.find('.fst') != -1]
for l in lines:
content = l.split(':')
constr=content[1].strip()[0:-1]
print("sed -i -e 's/%s[.]/%s?./g' %s" % (constr, constr, content[0]))
subprocess.call("sed -i -e 's/%s[.]/%s?./g' %s" %
|
(constr, constr, content[0]), shell=True)
|
Marocco2/EpicRace
|
BOX/box.py
|
Python
|
lgpl-3.0
| 10,406
| 0.002787
|
#
#
# BBBBBBBBBBBBBBBBB OOOOOOOOO XXXXXXX XXXXXXX
# B::::::::::::::::B OO:::::::::OO X:::::X X:::::X
# B::::::BBBBBB:::::B OO:::::::::::::OO X:::::X X:::::X
# BB:::::B B:::::BO:::::::OOO:::::::OX::::::X X::::::X
# B::::B B:::::BO::::::O O::::::OXXX:::::X X:::::XXX
# B::::B B:::::BO:::
|
::O O:::::O X:::::X X:::::X
# B::::BBBBBB:::::B O:::::O O:::::O X:::::
|
X:::::X
# B:::::::::::::BB O:::::O O:::::O X:::::::::X
# B::::BBBBBB:::::B O:::::O O:::::O X:::::::::X
# B::::B B:::::BO:::::O O:::::O X:::::X:::::X
# B::::B B:::::BO:::::O O:::::O X:::::X X:::::X
# B::::B B:::::BO::::::O O::::::OXXX:::::X X:::::XXX
# BB:::::BBBBBB::::::BO:::::::OOO:::::::OX::::::X X::::::X
# B:::::::::::::::::B OO:::::::::::::OO X:::::X X:::::X
# B::::::::::::::::B OO:::::::::OO X:::::X X:::::X
# BBBBBBBBBBBBBBBBB OOOOOOOOO XXXXXXX XXXXXXX
#
#
# Assetto Corsa framework created by Marco 'Marocco2' Mollace
#
# version 0.2
#
# Usage of this library is under LGPLv3. Be careful :)
#
#
import ac
import traceback
import os
import sys
import platform
try:
import ctypes
except:
ac.log('BOX: error loading ctypes: ' + traceback.format_exc())
raise
# TODO: read from config file for filters | IMPORTS
from os.path import dirname, realpath
# import configparser
import functools
import threading
import zipfile
import time
def async(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return wrapper
if platform.architecture()[0] == "64bit":
dllfolder = "stdlib64"
dllfolder = os.path.join(os.path.dirname(__file__), dllfolder)
fmodex = "fmodex64.dll"
else:
dllfolder = "stdlib"
dllfolder = os.path.join(os.path.dirname(__file__), dllfolder)
fmodex = "fmodex.dll"
sys.path.insert(0, dllfolder)
os.environ['PATH'] = os.environ['PATH'] + ";."
ctypes.windll[os.path.join(dllfolder, fmodex)]
box_lib_folder = os.path.join(os.path.dirname(__file__), 'box_lib')
sys.path.insert(0, box_lib_folder)
try:
import pyfmodex
except Exception as e:
ac.log('BOX: error loading pyfmodex: ' + traceback.format_exc())
raise
try:
import requests
except Exception as e:
ac.log('BOX: error loading requests: ' + traceback.format_exc())
raise
# A useful push notification via Telegram if I need send some news
def notification(telegram_bot_oauth):
try:
telegram_api_url = "https://api.telegram.org/bot" + telegram_bot_oauth + "/getUpdates"
r = requests.get(telegram_api_url)
message = r.json()
if message["ok"]:
var_notify = message["result"][-1]["message"]["text"]
ac.log('BOX: Notification from Telegram: ' + var_notify)
return var_notify
else:
var_notify = "No Telegram connection"
ac.log('BOX: ' + var_notify)
except:
ac.log('BOX: No Internet connection')
var_notify = ""
return var_notify
# It downloads a zip file and extract it in a folder
def get_zipfile(download_link, dir_path='', absolute_path=False):
try:
local_filename = download_link.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(download_link, stream=True)
log_getZipFile = "Download of " + local_filename + " completed"
where_is_zip = os.path.join(os.path.dirname(__file__), local_filename)
ac.log("BOX: " + log_getZipFile)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# f.flush() commented by recommendation from J.F.Sebastian
ac.log("BOX: " + where_is_zip)
try:
with zipfile.ZipFile(local_filename, "r") as z:
if dir_path == "" and not absolute_path:
z.extractall(os.path.dirname(__file__)) # Extracting files
elif absolute_path:
z.extractall(dir_path) # Extracting files
else:
z.extractall(os.path.join(os.path.dirname(__file__), dir_path)) # Extracting files
# os.remove(local_filename)
log_getZipFile = "Files extracted"
return log_getZipFile
except:
log_getZipFile = "Error extracting files"
return log_getZipFile
except:
log_getZipFile = "Error downloading zip file"
ac.log('BOX: error downloading zip file: ' + traceback.format_exc())
return log_getZipFile
# A new function to automatize app updates for AC
# WORK IN PROGRESS
# TODO: make reorder files logic
def newupdate(version, check_link, download_link, dir_path=''):
try:
r = requests.get(check_link)
if r.json() != version: # Check if server version and client version is the same
update_status = get_zipfile(download_link, dir_path)
return update_status
else:
update_status = "No new update"
ac.log('BOX: ' + update_status)
return update_status
except:
update_status = "Error checking new update"
ac.log('BOX: error checking new update: ' + traceback.format_exc())
return update_status
# Uses GitHub to check updates
# WORK IN PROGRESS
# TODO: make reorder files logic
def github_newupdate(git_repo, branch='master', sha='', dir_path=''):
try:
check_link = "https://api.github.com/repos/" + git_repo + "/commits/" + branch
headers = {'Accept': 'application/vnd.github.VERSION.sha'}
r = requests.get(check_link, headers=headers)
if sha == "":
try:
with open("apps\\python\\" + git_repo.split('/')[-1] + "\sha.txt", 'r') as g:
sha = g.read()
g.close()
except:
update_status = "No SHA available"
ac.log('BOX: ' + update_status)
return update_status
if r.text != sha: # Check if server version and client version is the same
download_link = "https://github.com/" + git_repo + "/archive/" + branch + ".zip"
update_status = get_zipfile(download_link, dir_path)
with open("apps\\python\\" + git_repo.split('/')[-1] + "\sha.txt", 'w') as j:
j.write(r.text)
j.close()
return update_status
else:
update_status = "No new update"
ac.log('BOX: ' + update_status)
return update_status
except:
update_status = "Error checking new update"
ac.log('BOX: error checking new update: ' + traceback.format_exc())
return update_status
from threading import Thread, Event
class SoundPlayer(object):
def __init__(self, player):
self._play_event = Event()
self.player = player
self.playbackpos = [0.0, 0.0, 0.0]
self.playbackvol = 1.0
self.EQ = []
self.initEq()
self.sound_mode = pyfmodex.constants.FMOD_CREATECOMPRESSEDSAMPLE
self.speaker_mix = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
for i in self.EQ:
self.player.add_dsp(i)
self.channel = self.player.get_channel(0)
self.queue = []
self.thread = Thread(target=self._worker)
self.thread.daemon = True
self.thread.start()
def initEq(self):
freq = [16.0, 31.5, 63.0, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0]
for i in freq:
dsp = self.player.create_dsp_by_type(pyfmodex.constants.FMOD_DSP_TYPE_PARAMEQ)
dsp.set_param(pyfmodex.constants.FMOD_DSP_PARAMEQ_GAIN, 1.0)
dsp.set_param(pyfmodex.constants.FMOD_DSP_PARAMEQ_BANDWIDTH, 1.0)
dsp.set_param(pyfmodex.constants.FMOD_DSP_PARAMEQ_CENTER, i)
self.EQ.append(dsp)
def set_volume(self, v
|
MasAval/django_pipedrive
|
pipedrive/migrations/0005_auto_20170510_1253.py
|
Python
|
bsd-3-clause
| 4,508
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pipedrive', '0004_auto_20170502_1701'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='subject',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='activity',
name='type',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='deal',
name='lost_reason',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='deal',
name='title',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='dealfield',
name='field_type',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='dealfield',
name='key',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='dealfield',
name='name',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='o
|
rganization',
name='address',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='organization',
name='name',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='organizationfield',
name='field_type',
field=models.Ch
|
arField(max_length=500),
),
migrations.AlterField(
model_name='organizationfield',
name='key',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='organizationfield',
name='name',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='person',
name='email',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='person',
name='name',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='person',
name='phone',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='personfield',
name='field_type',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='personfield',
name='key',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='personfield',
name='name',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='pipeline',
name='name',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='pipeline',
name='url_title',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='stage',
name='name',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='stage',
name='pipeline_name',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='user',
name='phone',
field=models.CharField(max_length=500, null=True, blank=True),
),
]
|
smmribeiro/intellij-community
|
python/helpers/pydev/setup_cython.py
|
Python
|
apache-2.0
| 5,308
| 0.004145
|
'''
A simpler setup version just to compile the speedup module.
It should be used as:
python setup_cython build_ext --inplace
Note: the .c file and other generated files are regenerated from
the .pyx file by running "python build_tools/build.py"
'''
import os
import sys
from setuptools import setup
os.chdir(os.path.dirname(os.path.abspath(__file__)))
IS_PY36_OR_GREATER = sys.version_info > (3, 6)
IS_PY39_OR_GREATER = sys.version_info > (3, 9)
def process_args():
extension_folder = None
target_pydevd_name = None
target_frame_eval = None
force_cython = False
for i, arg in enumerate(sys.argv[:]):
if arg == '--build-lib':
extension_folder = sys.argv[i + 1]
# It shouldn't be removed from sys.argv (among with --build-temp) because they're passed further to setup()
if arg.startswith('--target-pyd-name='):
sys.argv.remove(arg)
target_pydevd_name = arg[len('--target-pyd-name='):]
if arg.startswith('--target-pyd-frame-eval='):
sys.argv.remove(arg)
target_frame_eval = arg[len('--target-pyd-frame-eval='):]
if arg == '--force-cython':
sys.argv.remove(arg)
force_cython = True
return extension_folder, target_pydevd_name, target_frame_eval, force_cython
def build_extension(dir_name, extension_name, target_pydevd_name, force_cython, extended=False, has_pxd=False):
pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (extension_name,))
if target_pydevd_name != extension_name:
# It MUST be there in this case!
# (otherwise we'll have unresolved externals because the .c file had another name initially).
import shutil
# We must force cython in this case (but only in this case -- for the regular setup in the user machine, we
# should always compile the .c file).
force_cython = True
new_pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (target_pydevd_name,))
new_c_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.c" % (target_pydevd_name,))
shutil.copy(pyx_file, new_pyx_file)
pyx_file = new_pyx_file
if has_pxd:
pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (extension_name,))
new_pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (target_pyd
|
evd_name,))
shutil.copy(pxd_file, new_pxd_file)
assert os.path.exists(pyx_file)
try:
if force_cython:
from Cython.Build import cythonize # @UnusedImport
ext_modules = cythonize([
|
"%s/%s.pyx" % (dir_name, target_pydevd_name,),
], force=True)
else:
# Always compile the .c (and not the .pyx) file (which we should keep up-to-date by running build_tools/build.py).
from distutils.extension import Extension
ext_modules = [Extension("%s%s.%s" % (dir_name, "_ext" if extended else "", target_pydevd_name,),
[os.path.join(dir_name, "%s.c" % target_pydevd_name), ],
# uncomment to generate pdbs for visual studio.
# extra_compile_args=["-Zi", "/Od"],
# extra_link_args=["-debug"],
)]
setup(
name='Cythonize',
ext_modules=ext_modules
)
finally:
if target_pydevd_name != extension_name:
try:
os.remove(new_pyx_file)
except:
import traceback
traceback.print_exc()
try:
os.remove(new_c_file)
except:
import traceback
traceback.print_exc()
if has_pxd:
try:
os.remove(new_pxd_file)
except:
import traceback
traceback.print_exc()
extension_folder, target_pydevd_name, target_frame_eval, force_cython = process_args()
extension_name = "pydevd_cython"
if target_pydevd_name is None:
target_pydevd_name = extension_name
build_extension("_pydevd_bundle", extension_name, target_pydevd_name, force_cython, extension_folder, True)
if IS_PY36_OR_GREATER:
extension_name = "pydevd_frame_evaluator"
frame_eval_dir_name = "_pydevd_frame_eval"
target_frame_eval_common = "%s_%s" % (extension_name, "common")
build_extension(frame_eval_dir_name, target_frame_eval_common, target_frame_eval_common, force_cython, extension_folder,
True)
if IS_PY39_OR_GREATER:
extension_name += "_py39_and_above"
if target_frame_eval is None:
target_frame_eval = extension_name
build_extension(frame_eval_dir_name, extension_name, target_frame_eval, force_cython, extension_folder, True)
if extension_folder:
os.chdir(extension_folder)
for folder in [file for file in os.listdir(extension_folder) if
file != 'build' and os.path.isdir(os.path.join(extension_folder, file))]:
file = os.path.join(folder, "__init__.py")
if not os.path.exists(file):
open(file, 'a').close()
|
tomsilver/nupic
|
nupic/regions/CLAClassifierRegion.py
|
Python
|
gpl-3.0
| 9,996
| 0.006803
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file implements the CLA Classifier region. See the comments in the class
definition of CLAClassifierRegion for a description.
"""
from PyRegion import PyRegion
from nupic.algorithms.cla_classifier_factory import CLAClassifierFactory
###############################################################################
class CLAClassifierRegion(PyRegion):
"""
CLAClassifierRegion implements a CLA specific classifier that accepts a binary
input from the level below (the "activationPattern") and information from the
sensor and encoders (the "classification") describing the input to the system
at that time step.
When learning, for every bit in activation pattern, it records a history of the
classification each time that bit was active. The history is bounded by a
maximum allowed age so that old entries are thrown away.
For inference, it takes an ensemble approach. For every active bit in the
activationPattern, it looks up the most likely classification(s) from the
history stored for th
|
at bit and then votes across these to get the resulting
classification(s).
The caller can choose to tell the region that the classifications for
iteration N+K should be aligned with the activationPattern for iteration N.
This results in the classifier producing p
|
redictions for K steps in advance.
Any number of different K's can be specified, allowing the classifier to learn
and infer multi-step predictions for a number of steps in advance.
"""
###############################################################################
@classmethod
def getSpec(cls):
ns = dict(
description=CLAClassifierRegion.__doc__,
singleNodeOnly=True,
# The inputs and outputs are not used in this region because they are
# either sparse vectors or dictionaries and hence don't fit the "vector
# of real" input/output pattern.
# There is a custom compute() function provided that accepts the
# inputs and outputs.
inputs=dict(
categoryIn=dict(
description='Category of the input sample',
dataType='Real32',
count=1,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
),
outputs=dict(),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
steps=dict(
description='Comma separated list of the desired steps of '
'prediction that the classifier should learn',
dataType="Byte",
count=0,
constraints='',
defaultValue='1',
accessMode='Create'),
alpha=dict(
description='The alpha used to compute running averages of the '
'bucket duty cycles for each activation pattern bit. A lower '
'alpha results in longer term memory',
dataType="Real32",
count=1,
constraints='',
defaultValue=0.001,
accessMode='Create'),
implementation=dict(
description='The classifier implementation to use.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
clVerbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0 ,
accessMode='ReadWrite'),
),
commands=dict()
)
return ns
###############################################################################
def __init__(self,
steps='1',
alpha=0.001,
clVerbosity=0,
implementation=None,
):
# Convert the steps designation to a list
self.steps = steps
self.stepsList = eval("[%s]" % (steps))
self.alpha = alpha
self.verbosity = clVerbosity
# Initialize internal structures
self._claClassifier = CLAClassifierFactory.create(
steps=self.stepsList,
alpha=self.alpha,
verbosity=self.verbosity,
implementation=implementation,
)
self.learningMode = True
self.inferenceMode = False
self._initEphemerals()
###############################################################################
def _initEphemerals(self):
pass
###############################################################################
def initialize(self, dims, splitterMaps):
pass
###############################################################################
def clear(self):
self._claClassifier.clear()
###############################################################################
def getParameter(self, name, index=-1):
"""
Get the value of the parameter.
@param name -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
###############################################################################
def setParameter(self, name, index, value):
"""
Set the value of the parameter.
@param name -- the name of the parameter to update, as defined
by the Node Spec.
@param value -- the value to which the parameter is to be set.
"""
if name == "learningMode":
self.learningMode = bool(int(value))
elif name == "inferenceMode":
self.inferenceMode = bool(int(value))
else:
return PyRegion.setParameter(self, name, index, value)
###############################################################################
def reset(self):
pass
###############################################################################
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
We don't use this method in this region because the inputs and outputs don't
fit the standard "vector of reals" used by the
|
shubhamdhama/zulip
|
analytics/lib/counts.py
|
Python
|
apache-2.0
| 29,311
| 0.003685
|
import logging
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection
from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from analytics.models import (
BaseCount,
FillState,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Stream,
UserActivityInterval,
UserProfile,
models,
)
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError(f"Unknown frequency: {frequency}")
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return f"<CountStat: {self.property}>"
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta] = None, dependencies: Sequence[str] = []) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime, Optional[Realm]], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime,
realm: Optional[Realm]=None) -> None:
# TODO: The realm argument is not yet supported, in that we don't
# have a solution for how to update FillState if it is passed. It
# exists solely as partial plumbing for when we do fully implement
# doing single-realm analytics runs for use cases like data import.
#
# Also, note that for the realm argument to be properly supported,
# the CountStat object passed in needs to have come from
# E.g. get_count_stats(realm), i.e. have the realm_id already
# entered into the SQL query defined by the CountState object.
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError(f"Unknown frequency: {stat.frequency}")
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError(f"fill_to_time must be on an hour boundary: {fill_to_time}")
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s", stat.property)
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError(f"Unknown value for FillState.state: {fill_state.state}.")
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s.",
stat.property, dependency)
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s", stat.property, currently_filled)
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled, realm)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optional[Realm]=None) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
logger.info("%s run pull_function (%dms/%sr)",
stat.property, (time.time()-timer)*1000, rows_added)
do_aggregate_to_summary_table(stat, end_time, realm)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
realm: Optional[Realm]=None) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if realm is not None:
realm_clause = SQL("AND zerv
|
er_realm.id = {}").format(Literal(realm.id))
else:
realm_clause = SQL("")
if output_table in (UserCount, StreamCount):
realmcount_query = SQL("""
|
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
|
cygnushan/measurement
|
SC_spectrum/Ui_SC_main.py
|
Python
|
mit
| 31,084
| 0.001263
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G:\WorkDir\gas-sensing_resistors\SC_spectrum\SC_main.ui'
#
# Created: Wed Jan 20 20:49:15 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from Rt_mplCanvas import Rt_CanvasWidget
from SC_mplCanvas import SC_CanvasWidget
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SC_APP(object):
def setupUi(self, SC_APP):
SC_APP.setObjectName(_fromUtf8("SC_APP"))
SC_APP.resize(800, 600)
SC_APP.setMinimumSize(QtCore.QSize(800, 600))
SC_APP.setMaximumSize(QtCore.QSize(800, 600))
font = QtGui.QFont()
font.setPointSize(12)
SC_APP.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/lmd.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
SC_APP.setWindowIcon(icon)
self.verticalLayout_13 = QtGui.QVBoxLayout(SC_APP)
self.verticalLayout_13.setObjectName(_fromUtf8("verticalLayout_13"))
self.verticalLayout_12 = QtGui.QVBoxLayout()
self.verticalLayout_12.setObjectName(_fromUtf8("verticalLayout_12"))
self.horizontalLayout_15 = QtGui.QHBoxLayout()
self.horizontalLayout_15.setObjectName(_fromUtf8("horizon
|
talLayout_15"))
self.verticalLayout_10 = QtGui.QVBoxLayout()
self.verticalLayout_10.setObjectName(_fromUtf8("verticalLayout_10"))
self.SC_MPLS = QtGui.QStackedWidget(SC_APP)
self.SC_MPLS.setMinimumSize(QtCore.QSize(480, 320))
|
self.SC_MPLS.setMaximumSize(QtCore.QSize(480, 320))
font = QtGui.QFont()
font.setPointSize(12)
self.SC_MPLS.setFont(font)
self.SC_MPLS.setObjectName(_fromUtf8("SC_MPLS"))
self.Rt_MPL = Rt_CanvasWidget()
self.Rt_MPL.setObjectName(_fromUtf8("Rt_MPL"))
self.SC_MPLS.addWidget(self.Rt_MPL)
self.SC_MPL = SC_CanvasWidget()
self.SC_MPL.setObjectName(_fromUtf8("SC_MPL"))
self.SC_MPLS.addWidget(self.SC_MPL)
self.verticalLayout_10.addWidget(self.SC_MPLS)
self.log_state = QtGui.QCheckBox(SC_APP)
self.log_state.setObjectName(_fromUtf8("log_state"))
self.verticalLayout_10.addWidget(self.log_state)
self.groupBox_5 = QtGui.QGroupBox(SC_APP)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.groupBox_5)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_19 = QtGui.QHBoxLayout()
self.horizontalLayout_19.setObjectName(_fromUtf8("horizontalLayout_19"))
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.label_18 = QtGui.QLabel(self.groupBox_5)
self.label_18.setMinimumSize(QtCore.QSize(64, 32))
self.label_18.setMaximumSize(QtCore.QSize(64, 32))
self.label_18.setObjectName(_fromUtf8("label_18"))
self.horizontalLayout_12.addWidget(self.label_18)
self.run_time = QtGui.QLineEdit(self.groupBox_5)
self.run_time.setMinimumSize(QtCore.QSize(113, 22))
self.run_time.setMaximumSize(QtCore.QSize(113, 22))
self.run_time.setReadOnly(True)
self.run_time.setObjectName(_fromUtf8("run_time"))
self.horizontalLayout_12.addWidget(self.run_time)
self.label_5 = QtGui.QLabel(self.groupBox_5)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_12.addWidget(self.label_5)
self.horizontalLayout_19.addLayout(self.horizontalLayout_12)
self.horizontalLayout_18 = QtGui.QHBoxLayout()
self.horizontalLayout_18.setObjectName(_fromUtf8("horizontalLayout_18"))
self.label_19 = QtGui.QLabel(self.groupBox_5)
self.label_19.setMinimumSize(QtCore.QSize(56, 32))
self.label_19.setMaximumSize(QtCore.QSize(56, 32))
self.label_19.setObjectName(_fromUtf8("label_19"))
self.horizontalLayout_18.addWidget(self.label_19)
self.flow1 = QtGui.QLineEdit(self.groupBox_5)
self.flow1.setMinimumSize(QtCore.QSize(113, 22))
self.flow1.setMaximumSize(QtCore.QSize(113, 22))
# self.flow1.setReadOnly(True)
self.flow1.setObjectName(_fromUtf8("flow1"))
self.horizontalLayout_18.addWidget(self.flow1)
self.label_7 = QtGui.QLabel(self.groupBox_5)
self.label_7.setMinimumSize(QtCore.QSize(48, 32))
self.label_7.setMaximumSize(QtCore.QSize(48, 32))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_18.addWidget(self.label_7)
self.f1_open = QtGui.QCheckBox(self.groupBox_5)
self.f1_open.setText(_fromUtf8(""))
self.f1_open.setObjectName(_fromUtf8("f1_open"))
self.horizontalLayout_18.addWidget(self.f1_open)
self.horizontalLayout_19.addLayout(self.horizontalLayout_18)
self.verticalLayout_7.addLayout(self.horizontalLayout_19)
self.horizontalLayout_20 = QtGui.QHBoxLayout()
self.horizontalLayout_20.setObjectName(_fromUtf8("horizontalLayout_20"))
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.label_20 = QtGui.QLabel(self.groupBox_5)
self.label_20.setMinimumSize(QtCore.QSize(64, 32))
self.label_20.setMaximumSize(QtCore.QSize(64, 32))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.horizontalLayout_13.addWidget(self.label_20)
self.now_R = QtGui.QLineEdit(self.groupBox_5)
self.now_R.setMinimumSize(QtCore.QSize(113, 22))
self.now_R.setMaximumSize(QtCore.QSize(113, 22))
self.now_R.setReadOnly(True)
self.now_R.setObjectName(_fromUtf8("now_R"))
self.horizontalLayout_13.addWidget(self.now_R)
self.label_6 = QtGui.QLabel(self.groupBox_5)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_13.addWidget(self.label_6)
self.horizontalLayout_20.addLayout(self.horizontalLayout_13)
self.horizontalLayout_17 = QtGui.QHBoxLayout()
self.horizontalLayout_17.setObjectName(_fromUtf8("horizontalLayout_17"))
self.label_26 = QtGui.QLabel(self.groupBox_5)
self.label_26.setMinimumSize(QtCore.QSize(56, 32))
self.label_26.setMaximumSize(QtCore.QSize(56, 32))
self.label_26.setObjectName(_fromUtf8("label_26"))
self.horizontalLayout_17.addWidget(self.label_26)
self.flow2 = QtGui.QLineEdit(self.groupBox_5)
self.flow2.setMinimumSize(QtCore.QSize(113, 22))
self.flow2.setMaximumSize(QtCore.QSize(113, 22))
# self.flow2.setReadOnly(True)
self.flow2.setObjectName(_fromUtf8("flow2"))
self.horizontalLayout_17.addWidget(self.flow2)
self.label_8 = QtGui.QLabel(self.groupBox_5)
self.label_8.setMinimumSize(QtCore.QSize(48, 32))
self.label_8.setMaximumSize(QtCore.QSize(48, 32))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout_17.addWidget(self.label_8)
self.f2_open = QtGui.QCheckBox(self.groupBox_5)
self.f2_open.setText(_fromUtf8(""))
self.f2_open.setObjectName(_fromUtf8("f2_open"))
self.horizontalLayout_17.addWidget(self.f2_open)
self.horizontalLayout_20.addLayout(self.horizontalLayout_17)
self.verticalLayout_7.addLayout(self.horizontalLayout_20)
self.horizontalLayout_21 = QtGui.QHBoxLayout()
self.horizontalLayout_21.setObjectName(_fromUtf8("horizontalLayout_21"))
|
jstewmon/proselint
|
proselint/checks/wallace/redundancy.py
|
Python
|
bsd-3-clause
| 659
| 0
|
# -*- coding: utf-8 -*-
"""Redundancy.
---
layout: post
source: Da
|
vid Foster Wallace
source_url: http://bit.ly/1c85lgR
title: Red
|
undancy
date: 2014-06-10 12:31:19
categories: writing
---
Points out use redundant phrases.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "wallace.redundancy"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["rectangular", ["rectangular in shape"]],
["audible", ["audible to the ear"]],
]
return preferred_forms_check(text, redundancies, err, msg)
|
ToontownUprising/src
|
toontown/hood/Place.py
|
Python
|
mit
| 37,237
| 0.002793
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.showbase.PythonUtil import PriorityCallbacks
from toontown.safezone import PublicWalk
from toontown.launcher import DownloadForceAcknowledge
import TrialerForceAcknowledge
import ZoneUtil
from toontown.friends import FriendsListManager
from toontown.toonbase import ToontownGlobals
from toontown.toon.Toon import teleportDebug
from toontown.estate import HouseGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.avatar import Emote
from otp.avatar.Avatar import teleportNotify
from direct.task import Task
import QuietZoneState
from toontown.distributed import ToontownDistrictStats
class Place(StateData.StateData, FriendsListManager.FriendsListManager):
notify = DirectNotifyGlobal.directNotify.newCategory('Place')
def __init__(self, loader, doneEvent):
StateData.StateData.__init__(self, doneEvent)
FriendsListManager.FriendsListManager.__init__(self)
self.loader = loader
self.dfaDoneEvent = 'dfaDoneEvent'
self.trialerFADoneEvent = 'trialerFADoneEvent'
self.zoneId = None
self.trialerFA = None
self._tiToken = None
self._leftQuietZoneLocalCallbacks = PriorityCallbacks()
self._leftQuietZoneSubframeCall = None
self._setZoneCompleteLocalCallbacks = PriorityCallbacks()
self._setZoneCompleteSubframeCall = None
return
def load(self):
StateData.StateData.load(self)
FriendsListManager.FriendsListManager.load(self)
self.walkDoneEvent = 'walkDone'
self.walkStateData = PublicWalk.PublicWalk(self.fsm, self.walkDoneEvent)
self.walkStateData.load()
self._tempFSM = self.fsm
def unload(self):
StateData.StateData.unload(self)
FriendsListManager.FriendsListManager.unload(self)
self.notify.info('Unloading Place (%s). Fsm in %s' % (self.zoneId, self._tempFSM.getCurrentState().getName()))
if self._leftQuietZoneSubframeCall:
self._leftQuietZoneSubframeCall.cleanup()
self._leftQuietZoneSubframeCall = None
if self._setZoneCompleteSubframeCall:
self._setZoneCompleteSubframeCall.cleanup()
self._setZoneCompleteSubframeCall = None
self._leftQuietZoneLocalCallbacks = None
self._setZoneCompleteLocalCallbacks = None
del self._tempFSM
taskMgr.remove('goHomeFailed')
del self.walkDoneEvent
self.walkStateData.unload()
del self.walkStateData
del self.loader
if self.trialerFA:
self.trialerFA.exit()
del self.trialerFA
return
def _getQZState(self):
if hasattr(base, 'cr') and hasattr(base.cr, 'playGame'):
if hasattr(base.cr.playGame, 'quietZoneStateData') and base.cr.playGame.quietZoneStateData:
return base.cr.playGame.quietZoneStateData
return None
def addLeftQuietZoneCallback(self, callback, priority = None):
qzsd = self._getQZState()
if qzsd:
return qzsd.addLeftQuietZoneCallback(callback, priority)
else:
token = self._leftQuietZoneLocalCallbacks.add(callback, priority=priority)
if not self._leftQuietZoneSubframeCall:
self._leftQuietZoneSubframeCall = SubframeCall(self._doLeftQuietZoneCallbacks, taskMgr.getCurrentTask().getPriority() - 1)
return token
def removeLeftQuietZoneCallback(self, token):
if token is not None:
if token in self._leftQuietZoneLocalCallbacks:
self._leftQuietZoneLocalCallbacks.remove(token)
qzsd = self._getQZState()
if qzsd:
qzsd.removeLeftQuietZoneCallback(token)
return
def _doLeftQuietZoneCallbacks(self):
self._leftQuietZoneLocalCallbacks()
self._leftQuietZoneLocalCallbacks.clear()
self._leftQuietZoneSubframeCall = None
return
def addSetZoneCompleteCallback(self, callback, priority = None):
qzsd = self._getQZState()
if qzsd:
return qzsd.addSetZoneCompleteCallback(callback, priority)
else:
token = self._setZoneCompleteLocalCallbacks.add(callback, priority=priority)
if not self._setZoneCompleteSubframeCall:
self._setZoneCompleteSubframeCall = SubframeCall(self._doSetZoneCompleteLocalCallbacks, taskMgr.getCurrentTask().getPriority() - 1)
return token
def removeSetZoneCompleteCallback(self, token):
if token is not None:
if any(token==x[1] for x in self._setZoneCompleteLocalCallbacks._callbacks):
self._setZoneCompleteLocalCallbacks.remove(token)
qzsd = self._getQZState()
if qzsd:
qzsd.removeSetZoneCompleteCallback(token)
return
def _doSetZoneCompleteLocalCallbacks(self):
self._setZoneCompleteSubframeCall = None
localCallbacks = self._setZoneCompleteLocalCallbacks
self._setZoneCompleteLocalCallbacks()
localCallbacks.clear()
return
def setState(self, state):
if hasattr(self, 'fsm'):
curState = self.fsm.getName()
if state == 'pet' or curState == 'pet':
se
|
lf.preserveFriendsList()
self.fsm.request(state)
def getState(self):
if hasattr(self, 'fsm'):
curState = self.fsm.getCurrentState().getName()
return curState
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.getZoneId()
def isPeriodTimerEffective(self):
return 1
def handleTeleportQuery(self,
|
fromAvatar, toAvatar):
if base.config.GetBool('want-tptrack', False):
if toAvatar == localAvatar:
toAvatar.doTeleportResponse(fromAvatar, toAvatar, toAvatar.doId, 1, toAvatar.defaultShard, base.cr.playGame.getPlaceId(), self.getZoneId(), fromAvatar.doId)
else:
self.notify.warning('handleTeleportQuery toAvatar.doId != localAvatar.doId' % (toAvatar.doId, localAvatar.doId))
else:
fromAvatar.d_teleportResponse(toAvatar.doId, 1, toAvatar.defaultShard, base.cr.playGame.getPlaceId(), self.getZoneId())
def enablePeriodTimer(self):
if self.isPeriodTimerEffective():
if base.cr.periodTimerExpired:
taskMgr.doMethodLater(5, self.redoPeriodTimer, 'redoPeriodTimer')
self.accept('periodTimerExpired', self.periodTimerExpired)
def disablePeriodTimer(self):
taskMgr.remove('redoPeriodTimer')
self.ignore('periodTimerExpired')
def redoPeriodTimer(self, task):
messenger.send('periodTimerExpired')
return Task.done
def periodTimerExpired(self):
self.fsm.request('final')
if base.localAvatar.book.isEntered:
base.localAvatar.book.exit()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.__handlePeriodTimerBookClose)
else:
base.localAvatar.b_setAnimState('TeleportOut', 1, self.__handlePeriodTimerExitTeleport)
def exitPeriodTimerExpired(self):
pass
def __handlePeriodTimerBookClose(self):
base.localAvatar.b_setAnimState('TeleportOut', 1, self.__handlePeriodTimerExitTeleport)
def __handlePeriodTimerExitTeleport(self):
base.cr.loginFSM.request('periodTimeout')
def detectedPhoneCollision(self):
self.fsm.request('phone')
def detectedFishingCollision(self):
self.fsm.request('fishing')
def enterStart(self):
pass
def exitStart(self):
pass
def enterFinal(self):
pass
def exitFinal(self):
pass
def enterWalk(self, teleportIn = 0):
self.enterFLM()
self.walkStateData.enter()
if teleportIn == 0:
self.walkStateData.fsm.request('walking')
self.acceptOnce(self.walkDoneEvent, self.handleWalkDone)
if base.cr.productName
|
GuardianRG/angr
|
angr/simos.py
|
Python
|
bsd-2-clause
| 19,350
| 0.004393
|
"""
Manage OS-level configuration
"""
import logging
l = logging.getLogger("angr.simos")
from archinfo import ArchARM, ArchMIPS32, ArchX86, ArchAMD64
from simuvex import SimState, SimIRSB, SimStateSystem, SimActionData
from simuvex import s_options as o
from simuvex.s_procedure import SimProcedure, SimProcedureContinuation
from simuvex.s_type import SimTypePointer, SimTypeFunction, SimTypeTop
from cle.metaelf import MetaELF
from cle.backedcgc import BackedCGC
class SimOS(object):
"""A class describing OS/arch-level configuration"""
def __init__(self, project):
self.arch = project.arch
self.proj = project
self.continue_addr = None
self.configure_project()
def configure_project(self):
"""Configure the project to set up global settings (like SimProcedures)"""
self.continue_addr = self.proj._extern_obj.get_pseudo_addr('angr##simproc_continue')
self.proj.hook(self.continue_addr, SimProcedureContinuation)
def state_blank(self, addr=None, initial_prefix=None, **kwargs):
if kwargs.get('mode', None) is None:
kwargs['mode'] = self.proj._default_analysis_mode
if kwargs.get('memory_backer', None) is None:
kwargs['memory_backer'] = self.proj.loader.memory
if kwargs.get('arch', None) is None:
kwargs['arch'] = self.proj.arch
state = SimState(**kwargs)
state.regs.sp = self.arch.initial_sp
if initial_prefix is not None:
for reg in state.arch.default_symbolic_registers:
state.registers.store(reg, state.se.Unconstrained(initial_prefix + "_" + reg,
state.arch.bits,
explicit_name=True))
for reg, val, is_addr, mem_region in state.arch.default_register_values:
if o.ABSTRACT_MEMORY in state.options and is_addr:
address = state.se.ValueSet(region=mem_region, bits=state.arch.bits, val=val)
state.registers.store(reg, address)
else:
state.registers.store(reg, val)
if addr is None: addr = self.proj.entry
state.regs.ip = addr
state.scratch.ins_addr = addr
state.scratch.bbl_addr = addr
state.scratch.stmt_idx = 0
state.scratch.jumpkind = 'Ijk_Boring'
state.procedure_data.hook_addr = self.continue_addr
return state
def state_entry(self, **kwargs):
return self.state_blank(**kwargs)
def state_full_init(self, **kwargs):
return self.state_entry(**kwargs)
def prepare_call_state(self, calling_state, initial_state=None,
preserve_registers=(), preserve_memory=()):
'''
This function prepares a state that is executing a call instruction.
If given an initial_state, it copies over all of the critical registers to it from the
calling_state. Otherwise, it prepares the calling_state for action.
This is mostly used to create minimalistic for CFG generation. Some ABIs, such as MIPS PIE and
x86 PIE, require certain information to be maintained in certain registers. For example, for
PIE MIPS, this function transfer t9, gp, and ra to the new state.
'''
if isinstance(self.arch, ArchMIPS32):
if initial_state is not None:
initial_state = self.state_blank()
mips_caller_saves = ('s0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 'gp', 'sp', 'bp', 'ra')
preserve_registers = preserve_registers + mips_caller_saves + ('t9',)
if initial_state is None:
new_state = calling_state.copy()
else:
new_state = initial_state.copy()
for reg in set(preserve_registers):
new_state.registers.store(reg, calling_state.registers.load(reg))
for addr, val in set(preserve_memory):
new_state.memory.store(addr, calling_state.memory.load(addr, val))
return new_state
class SimLinux(SimOS):
"""OS-specific configuration for *nix-y OSes"""
def __init__(self, *args, **kwargs):
super(SimLinux, self).__init__(*args, **kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._vsyscall_addr = None
def configure_project(self):
super(SimLinux, self).configure_project()
self._loader_addr = self.proj._extern_obj.get_pseudo_addr('angr##loader')
self._loader_lock_addr = self.proj._extern_obj.get_pseudo_addr('angr##loader_lock')
self._loader_unlock_addr = self.proj._extern_obj.get_pseudo_addr('angr##loader_unlock')
self._vsyscall_addr = self.proj._extern_obj.get_pseudo_addr('angr##vsyscall')
self.proj.hook(self._loader_addr, LinuxLo
|
ader, kwargs={'project': self.proj})
self.proj.hook(self._loader_lock_addr, _dl_rtld_lock_recursive)
self.proj.hook(self._loader_unlock_addr, _dl_rtld_unlock_recursive)
self.proj.hook(self._vsyscall_addr, _vsyscall)
ld_obj = self.proj.loader.linux_loader_object
if ld_obj is not None:
tlsfunc = ld_obj.get_symbol('__tls_get_addr')
if tlsfunc is not None:
self.proj.hook(tlsfunc.rebased_addr, _tls_get_addr, kwargs={'ld': self.pr
|
oj.loader})
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.proj.arch, ArchAMD64):
self.proj.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.proj.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
tls_obj = self.proj.loader.tls_object
if tls_obj is not None:
if isinstance(self.proj.arch, ArchAMD64):
self.proj.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.proj.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.proj.arch, ArchX86):
self.proj.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.proj.arch, ArchARM):
self.proj.hook(0xffff0fe0, _kernel_user_helper_get_tls, kwargs={'ld': self.proj.loader})
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.proj.loader.main_bin, MetaELF):
if isinstance(self.proj.arch, ArchAMD64):
for binary in self.proj.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
if reloc.resolvedby.type != 'STT_GNU_IFUNC':
continue
gotaddr = reloc.addr + binary.rebase_addr
gotvalue = self.proj.loader.memory.read_addr_at(gotaddr)
if self.proj.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'proj': self.proj,
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
randaddr = self.proj._extern_obj.get_pseudo_addr('ifunc_' + reloc.symbol.name)
self.proj.hook(randaddr, IFuncResolver, kwargs=kwargs)
self.proj.loader.memory.write_addr_at(gotaddr, randaddr)
def state_blank(self, fs=None, **kwargs):
state = super(SimLinux, self).state_blank(
|
clld/tsezacp
|
tests/test_selenium.py
|
Python
|
apache-2.0
| 183
| 0
|
fro
|
m __future
|
__ import unicode_literals
import time
import pytest
@pytest.mark.selenium
def test_ui(selenium):
selenium.browser.get(selenium.url('/download'))
time.sleep(3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.