repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
movmov/cc | vendor/Twisted-10.0.0/doc/words/examples/jabber_client.py | Python | apache-2.0 | 881 | 0.012486 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
# Originally written by Darryl Vandorp
# http://randomthoughts.vandorp.ca/
from twisted.words.protocols.jabber import client, jid
from twisted.words.xish import domish
from twisted.internet import reactor
def authd(xmlstream):
print "authenticated"
presence = domish.Element(('jabber:client','presence'))
xmlstream.send(prese | nce)
xmlstream.addObserver('/message', debug)
xmlstream.addObserver('/presence', debug)
xmlstream.addObserver('/iq', debug)
def debug(elem):
print elem.toXml().encode('utf-8')
print "="*20
myJid = jid.JID('username@server.jabber/twisted_words')
factory = client.basicClientFactory(myJid, 'password')
factory.addBootstrap('//eve | nt/stream/authd',authd)
reactor.connectTCP('server.jabber',5222,factory)
reactor.run()
|
bastibl/gnuradio | gr-zeromq/python/zeromq/qa_zeromq_pubsub.py | Python | gpl-3.0 | 2,008 | 0.003486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, zeromq
import time
class qa_zeromq_pubsub (gr_unittest.TestCase):
def setUp (self):
self.send_tb = gr.top_block()
self.recv_tb = gr.top_block()
def tearDown (self):
self.send_tb = None
self.re | cv_tb = None
def test_001 (self):
vlen = 10
src_data = list(range(vlen))*100
src = blocks.vector_source_f(src_data, False, vlen)
zeromq_pub_sink = zeromq.pub_sink(gr.sizeof_float, vlen, "tcp://127.0.0.1:0", 0)
| address = zeromq_pub_sink.last_endpoint()
zeromq_sub_source = zeromq.sub_source(gr.sizeof_float, vlen, address, 1000)
sink = blocks.vector_sink_f(vlen)
self.send_tb.connect(src, zeromq_pub_sink)
self.recv_tb.connect(zeromq_sub_source, sink)
self.recv_tb.start()
time.sleep(0.5)
self.send_tb.start()
time.sleep(0.5)
self.recv_tb.stop()
self.send_tb.stop()
self.recv_tb.wait()
self.send_tb.wait()
self.assertFloatTuplesAlmostEqual(sink.data(), src_data)
if __name__ == '__main__':
gr_unittest.run(qa_zeromq_pubsub)
|
smarr/GraalCompiler | mx.graal/mx_graal.py | Python | gpl-2.0 | 21,852 | 0.003936 | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, platform
from os.path import join, exists, basename
from argparse import ArgumentParser
import sanitycheck
import itertools
import json
import re
import mx
import mx_jvmci
from mx_jvmci import JvmciJDKDeployedDist, run_vm, VM, Task, get_vm, isJVMCIEnabled, get_jvmci_jdk, get_jvmci_jdk_dir, buildvms
from mx_unittest import unittest
import mx_gate
_suite = mx.suite('graal')
class GraalJDKDeployedDist(JvmciJDKDeployedDist):
def __init__(self):
JvmciJDKDeployedDist.__init__(self, 'GRAAL_HOTSPOT', compilers=['graal-economy', 'graal'])
def deploy(self, jdkDir):
JvmciJDKDeployedDist.deploy(self, jdkDir)
self._updateGraalPropertiesFile(join(jdkDir, 'jre', 'lib'))
def _updateGraalPropertiesFile(self, jreLibDir):
"""
Updates (or creates) 'jreLibDir'/jvmci/graal.properties to set/modify the
graal.version property.
"""
version = _suite.release_version()
graalProperties = join(jreLibDir, 'jvmci', 'graal.properties')
if not exists(graalProperties):
with open(graalProperties, 'w') as fp:
print >> fp, 'graal.version=' + version
else:
content = []
with open(graalProperties) as fp:
for line in fp:
if line.startswith('graal.version='):
content.append('graal.version=' + version)
else:
content.append(line.rstrip(os.linesep))
with open(graalProperties, 'w') as fp:
fp.write(os.linesep.join(content))
mx_jvmci.jdkDeployedDists += [
JvmciJDKDeployedDist('GRAAL_NODEINFO'),
JvmciJDKDeployedDist('GRAAL_API'),
JvmciJDKDeployedDist('GRAAL_COMPILER'),
JvmciJDKDeployedDist('GRAAL'),
GraalJDKDeployedDist(),
JvmciJDKDeployedDist('GRAAL_TRUFFLE'),
JvmciJDKDeployedDist('GRAAL_TRUFFLE_HOTSPOT'),
]
mx_gate.add_jacoco_includes(['com.oracle.graal.*'])
mx_gate.add_jacoco_excluded_annotations(['@Snippet', '@ClassSubstitution'])
def _run_benchmark(args, availableBenchmarks, runBenchmark):
vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
if availableBenchmarks is None:
harnessArgs = benchmarksAndOptions
return runBenchmark(None, harnessArgs, vmOpts)
if len(benchmarksAndOptions) == 0:
mx.abort('at least one benchmark name or "all" must be specified')
benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
harnessArgs = benchmarksAndOptions[len(benchmarks):]
if 'all' in benchmarks:
benchmarks = availableBenchmarks
else:
for bm in benchmarks:
if bm not in availableBenchmarks:
mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
failed = []
for bm in benchmarks:
if not runBenchmark(bm, harnessArgs, vmOpts):
failed.append(bm)
if len(failed) != 0:
mx.abort('Benchmark failures: ' + str(failed))
def dacapo(args):
"""run one or more DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
def scaladacapo(args):
"""run one or more Scala DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getScalaDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
# This is different than the 'jmh' commmand in that it
# looks for internal JMH benchmarks (i.e. those that
# depend on the JMH library).
def microbench(args):
"""run JMH microbenchmark projects"""
vmArgs, jmhArgs = mx.extract_VM_args(args, useDoubleDash=True)
if isJVMCIEnabled(get_vm()) and '-XX:-UseJVMCIClassLoader' not in vmArgs:
vmArgs = ['-XX:-UseJVMCIClassLoader'] + vmArgs
# look for -f in JMH arguments
containsF = Fals | e
forking = True
for i in range(len(jmhArgs)):
arg = jmhArgs[i]
if arg.startswith('-f'):
containsF = True
if arg == '-f' and (i+1) < len(jmhArg | s):
arg += jmhArgs[i+1]
try:
if int(arg[2:]) == 0:
forking = False
except ValueError:
pass
# default to -f1 if not specified otherwise
if not containsF:
jmhArgs += ['-f1']
# find all projects with a direct JMH dependency
jmhProjects = []
for p in mx.projects_opt_limit_to_suites():
if 'JMH' in [x.name for x in p.deps]:
jmhProjects.append(p.name)
cp = mx.classpath(jmhProjects)
# execute JMH runner
args = ['-cp', cp]
if not forking:
args += vmArgs
args += ['org.openjdk.jmh.Main']
if forking:
jdk = get_jvmci_jdk()
jvm = get_vm()
def quoteSpace(s):
if " " in s:
return '"' + s + '"'
return s
forkedVmArgs = map(quoteSpace, jdk.parseVmArgs(vmArgs))
args += ['--jvmArgsPrepend', ' '.join(['-' + jvm] + forkedVmArgs)]
run_vm(args + jmhArgs)
def ctw(args):
"""run CompileTheWorld"""
defaultCtwopts = '-Inline'
parser = ArgumentParser(prog='mx ctw')
parser.add_argument('--ctwopts', action='store', help='space separated JVMCI options used for CTW compilations (default: --ctwopts="' + defaultCtwopts + '")', default=defaultCtwopts, metavar='<options>')
parser.add_argument('--jar', action='store', help='jar of classes to compiled instead of rt.jar', metavar='<path>')
args, vmargs = parser.parse_known_args(args)
if args.ctwopts:
# Replace spaces with '#' since -G: options cannot contain spaces
# when they are collated in the "jvmci.options" system property
vmargs.append('-G:CompileTheWorldConfig=' + re.sub(r'\s+', '#', args.ctwopts))
if args.jar:
jar = os.path.abspath(args.jar)
else:
jar = join(get_jvmci_jdk_dir(deployDists=False), 'jre', 'lib', 'rt.jar')
vmargs.append('-G:CompileTheWorldExcludeMethodFilter=sun.awt.X11.*.*')
# suppress menubar and dock when running on Mac; exclude x11 classes as they may cause vm crashes (on Solaris)
vmargs = ['-Djava.awt.headless=true'] + vmargs
vm_ = get_vm()
if isJVMCIEnabled(vm_):
if vm_ == 'jvmci':
vmargs += ['-XX:+BootstrapJVMCI']
vmargs += ['-G:CompileTheWorldClasspath=' + jar, '-XX:-UseJVMCIClassLoader', 'com.oracle.graal.hotspot.CompileTheWorld']
else:
vmargs += ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + jar]
run_vm(vmargs)
class UnitTestRun:
def __init__(self, name, args):
self.name = name
self.args = args
def run(s |
f-santos/elasticsearch-dsl-py | test_elasticsearch_dsl/test_search.py | Python | apache-2.0 | 15,005 | 0.003932 | from copy import deepcopy
from elasticsearch_dsl import search, query, Q, DocType, utils
def test_execute_uses_cache():
s = search.Search()
r = object()
s._response = r
assert r is s.execute()
def test_cache_can_be_ignored(mock_client):
s = search.Search(using='mock')
r = object()
s._response = r
s.execute(ignore_cache=True)
mock_client.search.assert_called_once_with(
doc_type=[],
index=None,
body={'query': {'match_all': {}}},
)
def test_iter_iterates_over_hits():
s = search.Search()
s._response = [1, 2, 3]
assert [1, 2, 3] == list(s)
def test_count_uses_cache():
s = search.Search()
s._response = utils.AttrDict({'hits': {'total': 42}})
assert 42 == s.count()
def test_cache_isnt_cloned():
s = search.Search()
s._response = object()
assert not hasattr(s._clone(), '_response')
def test_search_starts_with_empty_query():
s = search.Search()
assert s.query._proxied == query.MatchAll()
def test_search_query_combines_query():
s = search.Search()
s2 = s.query('match', f=42)
assert s2.query._proxied == query.Match(f=42)
assert s.query._proxied == query.MatchAll()
s3 = s2.query('match', f=43)
assert s2.query._proxied == query.Match(f=42)
assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)])
def test_query_can_be_assigned_to():
s = search.Search()
q = Q('match', title='python')
s.query = q
assert s.query._proxied is q
def test_query_can_be_wrapped():
s = search.Search().query('match', title='python')
s.query = Q('function_score', query=s.query, field_value_factor={'field': 'rating'})
assert {
'query': {
'function_score': {
'functions': [{'field_value_factor': {'field': 'rating'}}],
'query': {'match': {'title': 'python'}}
}
}
}== s.to_dict()
def test_using():
o = object()
o2 = object()
s = search.Search(using=o)
assert s._using is o
s2 = s.using(o2)
assert s._using is o
assert s2._using is o2
def test_methods_are_proxied_to_the_query():
s = search.Search()
assert s.query.to_dict() == {'match_all': {}}
def test_query_always_returns_search():
s = search.Search()
assert isinstance(s.query('match', f=42), search.Search)
def test_source_copied_on_clone():
s = search.Search().source(False)
assert s._clone()._source == s._source
assert s._clone()._source is False
s2 = search.Search().source([])
assert s2._clone()._source == s2._source
assert s2._source == []
s3 = search.Search().source(["some", "fields"])
assert s3._clone()._source == s3._source
assert s3._clone()._source == ["some", "fields"]
def test_aggs_get_copied_on_change():
s = search.Search()
s.aggs.bucket('per_tag', 'terms', field='f').metric('max_score', 'max', field='score')
s2 = s.query('match_all')
s2.aggs.bucket('per_month', 'date_histogram', field='date', interval='month')
s3 = s2.query('match_all')
s3.aggs['per_month'].metric('max_score', 'max', field='score')
s4 = s3._clone()
s4.aggs.metric('max_score', 'max', field='score')
d = {
'query': {'match_all': {}},
'aggs': {
'per_tag': {
'terms': {'field': 'f'},
'aggs': {'max_score': {'max': {'field': 'score'}}}
}
}
}
assert d == s.to_dict()
d['aggs']['per_month'] = {"date_histogram": {'field': 'date', 'interval': 'month'}}
assert d == s2.to_dict()
d['aggs']['per_month']['aggs'] = {"max_score": {"max": {"field": 'score'}}}
assert d == s3.to_dict()
d['aggs']['max_score'] = {"max": {"field": 'score'}}
assert d == s4.to_dict()
def test_search_index():
s = search.Search(index='i')
assert s._index == ['i']
s = s.index('i2')
assert s._index == ['i', 'i2']
s = s.index()
assert s._index is None
s = search.Search(index=('i', 'i2'))
assert s._index == ['i', 'i2']
s = search.Search(index=['i', 'i2'])
assert s._index == ['i', 'i2']
s = search.Search()
s = s.index('i', 'i2')
assert s._index == ['i', 'i2']
s2 = s.index('i3')
assert s._index == ['i', 'i2']
assert s2._index == ['i', 'i2', 'i3']
s = search.Search()
s = s.index(['i', 'i2'], 'i3')
assert s._index == ['i', 'i2', 'i3']
s2 = s.index('i4')
assert s._index == ['i', 'i2', 'i3']
assert s2._index == ['i', 'i2', 'i3', 'i4']
s2 = s.index(['i4'])
assert s2._index == ['i', 'i2', 'i3', 'i4']
s2 = s.index(('i4', 'i5'))
assert s2._index == ['i', 'i2', 'i3', 'i4', 'i5']
def test_search_doc_type():
s = search.Search(doc_type='i')
assert s._doc_type == ['i']
s = s.doc_type('i2')
assert s._doc_type == ['i', 'i2']
s = s.doc_type()
assert s._doc_type == []
s = search.Search(doc_type=('i', 'i2'))
assert s._doc_type == ['i', 'i2']
s = search.Search(doc_type=['i', 'i2'])
assert s._doc_type == ['i', 'i2']
s = search.Search()
s = s.doc_type('i', 'i2')
assert s._doc_type == ['i', 'i2']
s2 = s.doc_type('i3')
assert s._doc_type == ['i', 'i2']
assert s2._doc_type == ['i', 'i2', 'i3']
def test_doc_type_can_be_document_class():
class MyDocType(DocType):
pass
s = search.Search(doc_type=MyDocType)
assert s._doc_type == ['my_doc_type']
assert s._doc_type_map == {'my_doc_type': MyDocType}
s = search.Search().doc_type(MyDocType)
assert s._doc_type == ['my_doc_type']
assert s._doc_type_map == {'my_doc_type': MyDocType}
def test_sort():
s = search.Search()
s = s.sort('fielda', '-fieldb')
assert ['fielda', {'fieldb': {'order': 'desc'}}] == s._sort
assert {'query': {'match_all': {}}, 'sort': ['fielda', {'fieldb': {'order': 'desc'}}]} == s.to_dict()
s = s.sort()
assert [] == s._sort
assert search.Search().to_dict() == s.to_dict()
def test_slice():
s = search.Search()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 7} == s[3:10].to_dict()
assert {'query': {'match_all': {}}, 'from': 0, 'size': 5} == s[:5].to_dict()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 10} == s[3:].to_dict()
assert {'query': {'match_all': {}}, 'from': 0, 'size': 0} == s[0:0].to_dict()
def test_index():
s = search.Search()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 1} == s[3].to_dict()
def test_search_to_dict():
s = search.Search()
assert {"query": {"match_all": {}}} == s.to_dict()
s = s.query('match', f=42)
assert {"query": {"match": {'f': 42}}} == s.to_dict()
assert {"query": {"match": {'f': 42}}, "size": 10} == s.to_dict(size=10)
s.aggs.bucket('per_tag', 'terms', field='f').metric('ma | x_score', 'max', field='score')
d = {
'aggs': {
'per_tag': {
'terms': {'field': 'f'},
'aggs': {'max_score': {'max': {'field': 'score'}}}
}
},
'query': {'match': {'f': 42}}
}
assert d == s.to_dict()
s = search.Search(extr | a={"size": 5})
assert {"query": {"match_all": {}}, "size": 5} == s.to_dict()
s = s.extra(from_=42)
assert {"query": {"match_all": {}}, "size": 5, "from": 42} == s.to_dict()
def test_complex_example():
s = search.Search()
s = s.query('match', title='python') \
.query(~Q('match', title='ruby')) \
.filter(Q('term', category='meetup') | Q('term', category='conference')) \
.post_filter('terms', tags=['prague', 'czech']) \
.script_fields(more_attendees="doc['attendees'].value + 42")
s.aggs.bucket('per_country', 'terms', field='country')\
.metric('avg_attendees', 'avg', field='attendees')
s.query.minimum_should_match = 2
s = s.highlight_options(order='score').highlight('title', 'body', fragment_size=50)
assert {
'query': {
'bool': {
'filter': [
{
'bool': {
'should': [
{'term': {'category': 'meetup'}},
|
sciunto/inforevealer | src/action.py | Python | gpl-2.0 | 5,161 | 0.045146 | # -*- coding: utf-8 -*-
# Inforevealer
# Copyright (C) 2010 Francois Boulogne <fboulogne at april dot org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import io, readconf, getinfo, pastebin
import os, sys, gettext,string, pexpect,getpass
gettext.textdomain('inforevealer')
_ = gettext.gettext
__version__="0.5.1"
def askYesNo(question,default='y'):
""" Yes/no question throught a console """
if string.lower(default) == 'y':
question = question + " [Y/n]"
else:
question = question + " [y/N]"
ret = string.lower(raw_input(question))
if ret == 'y' or ret == "":
answer=True
else:
answer=False
return answer
def RunAs(category_info,gui=False):
""" Check if root is needed, if user want to be root... """
if gui: from gui import yesNoDialog
run_as='user'
if os.getuid() == 0:
#we are root
run_as='root'
else:
#check if root is needed
root_needed=False
for i in category_info:
if i.root:
root_needed=True
break
if root_needed:
#ask if the user want to substitute
question=_("""To generate a complete report, root access is needed.
Do you want to substitute user?""")
if gui:
#substitute=yesNoDialog(question=question)
substitute=True #It seems more confortable to remove the question
else:
#substitute=askYesNo(question)
substitute=True #It seems more confortable to remove the question
if substitute:
run_as="substitute"
else:
run_as="user"
else:
run_as='user'
return run_as
def CompleteReportAsRoot(run_as,tmp_configfile,gui=False):
"""Run a new instance of inforevealer with root priviledge to complete tmp_configfile"""
if gui: from gui import askPassword
if run_as == "substitute":
| #find the substitute user command and run the script
if pexpect.which('su') != None:
message=_("Please, enter the root password.")
root_instance | = str(pexpect.which('su')) + " - -c \'"+ os.path.abspath(sys.argv[0])+" --runfile "+ tmp_configfile+"\'"
elif pexpect.which('sudo') != None: #TODO checkme
message=_("Please, enter your user password.")
root_instance = str(pexpect.which('sudo')) + ' ' + os.path.abspath(sys.argv[0])+' --runfile '+ tmp_configfile
else:
sys.stderr.write(_("Error: No substitute user command available.\n"))
return 1
ret=""
count=0
while ret!=[' \r\n'] and count <3:
#Get password
count+=1
if gui:
password=askPassword(question=message)
else:
print(message)
password=getpass.getpass()
if password != False: #askPassword could return False
#Run the command #TODO exceptions ?
child = pexpect.spawn(root_instance)
ret=child.expect([".*:",pexpect.EOF]) #Could we do more ?
child.sendline(password)
ret = child.readlines()
if ret ==[' \r\n']: return 0
message=_("Wrong password.\nThe log will be generated without root priviledge.")
if gui:
import gtk
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, message)
md.set_title(_("Error"))
md.run()
md.destroy()
else:
print(message)
def action(category,dumpfile,configfile,tmp_configfile,verbosity,gui=False):
if gui: from gui import yesNoDialog
#####################
# Write in dumpfile
#####################
dumpfile_handler= open(dumpfile,'w')
io.print_write_header(dumpfile_handler)
dumpfile_handler.write('Category: '+ category+'\n')
category_info = readconf.LoadCategoryInfo(configfile,category)
#need/want to run commands as...
run_as = RunAs(category_info,gui)
#detect which distribution the user uses
linux_distrib=getinfo.General_info(dumpfile_handler)
# In the case of run_as='substitute'
# a configuration file is generated
# su/sudo is used to run a new instance of inforevealer in append mode
# to complete the report
tmp_configfile_handler= open(tmp_configfile,'w')
for i in category_info:
i.write(linux_distrib,verbosity,dumpfile_handler,dumpfile,run_as,tmp_configfile_handler)
tmp_configfile_handler.close()
#Use su or sudo to complete the report
dumpfile_handler.close() #the next function will modify the report, close the dumpfile
CompleteReportAsRoot(run_as,tmp_configfile,gui)
# Message to close the report
dumpfile_handler= open(dumpfile,'a')
io.write_title("You didn\'t find what you expected?",dumpfile_handler)
dumpfile_handler.write( 'Please, open a bug report on\nhttp://github.com/sciunto/inforevealer\n')
dumpfile_handler.close()
print( _("The output has been dumped in %s") %dumpfile)
|
FrancoisRheaultUS/dipy | dipy/tracking/tests/test_propagation.py | Python | bsd-3-clause | 2,183 | 0 | import numpy as np
from dipy.data import default_sphere
from dipy.tracking.propspeed import ndarray_offset, eudx_both_directions
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite)
def stepped_1d(arr_1d):
# Make a version of `arr_1d` which is not contiguous
return np.vstack((arr_1d, arr_1d)).ravel(order='F')[::2]
def test_offset():
# Test ndarray_offset function
for dt in (np.int32, np.float64):
index = np.array([1, 1], dtype=np.intp)
A = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]], dtype=dt)
strides = np.array(A.strides, np.intp)
i_size = A.dtype.itemsize
assert_equal(ndarray_offset(index, strides, 2, i_size), 4)
assert_equal(A.ravel()[4], A[1, 1])
# Index and strides arrays must be C-continuous. Test this is enforced
# by using non-contiguous versions of the input arrays.
assert_raises(ValueError, ndarray_offset,
stepped_1d(index), strides, 2, i_size)
assert_raises(ValueError, ndarray_offset,
index, stepped_1d(strides), 2, i_size)
def test_eudx_both_directions_errors():
# Test error conditions for both directions function
sphere = default_sphere
seed = np.zeros(3, np.float64)
qa = np.zeros((4, 5, 6, 7), np.float64)
ind = qa.copy()
# All of seed, qa, ind, odf_vertices must be C-contiguous. Check by
# passing in versions that aren't C contiguous
assert_raises(ValueError, eudx_both_directions,
stepped_1d(seed), 0, qa, ind, sphere.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
seed, 0, qa[..., ::2], ind, sp | here.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
| seed, 0, qa, ind[..., ::2], sphere.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
seed, 0, qa, ind, sphere.vertices[::2], 0.5, 0.1,
1., 1., 2)
if __name__ == '__main__':
run_module_suite()
|
ep0s/soulmaster | menu.py | Python | gpl-3.0 | 4,205 | 0.000476 | # -*- coding: utf-8 -*-
from sdl2 import SDL_Delay,\
SDL_GetTicks,\
SDL_KEYDOWN,\
SDL_KEYUP,\
SDL_QUIT,\
SDL_Rect,\
SDL_RenderCopy,\
SDLK_ESCAPE,\
SDLK_UP,\
SDLK_DOWN,\
SDLK_RETURN,\
SDL_Quit
from sdl2.ext import Resources,\
get_events
from const import WindowSize, Colors
from input import Input
from ui import DialogBox
from game import Game
FPS = 60 # units.FPS
MAX_FRAME_TIME = int(5 * (1000 / FPS))
RESOURCES = Resources(__file__, 'resources')
class Menu:
def __init__(self, window, world, renderer, factory):
self.window = window
self.renderer = renderer
self.world = world
self.factory = factory
self.rsystem = factory.create_sprite_render_system(window)
self.menu_bg = RESOURCES.get_path("menu_bg.png")
self.menu_cursor = RESOURCES.get_path("menu_cursor.png")
self.running = True
self.position = 460, 340
self.cursor_start_position = 370, 330
self.cursor_position = 0
self.cursor_sprite_size = 32
self.background_sprite = self.factory.from_image(self.menu_bg)
self.cursor_sprite = self.factory.from_image(self.menu_cursor)
self.text = {0: "START",
1: "OPTIONS",
2: "EXIT"}
self.dialog = DialogBox(self.factory,
font_size=32,
fg_color=Colors.WHITE,
bg_color=Colors.BLACK,
font_name="04B_20__.TTF",
text=self.text,
position=self.position,
renderer=self.renderer)
self.sprites = [self.background_sprite]
sprites = self.dialog.get_sprites()
for sprite in sprites:
self.sprites.append(sprite)
self.sprites.append(self.cursor_sprite)
def __del__(self):
SDL_Quit()
def update(self, elapsed_time):
self.cursor_sprite.position = self.cursor_start_position[0], self.cursor_start_position[1] \
+ self.cursor_position * self.cursor_sprite_size
def run(self):
menu_input = Input()
last_update_time = SDL_GetTicks() # units.MS
while self.running:
start_time = SDL_GetTicks() # units.MS
menu_input.begin_new_frame()
menu_events = get_events()
for event in menu_events:
if event.type == SDL_KEYDOWN:
menu_input.key_down_event(event)
elif event.type == SDL_KEYUP:
menu_input.key_up_event(event)
elif event.type == SDL_QUIT:
self.running = False
break
# Exit
if menu_input.was_key_pressed(SDLK_ESCAPE):
self.running = False
# Move the cu | rsor
elif menu_input.was_key_pressed(SDLK_UP):
if self.cursor_position != 0:
self.cursor_position -= 1
elif menu_input.was_key_pressed(SDLK_DOWN):
if self.cursor_position != 2:
self.cursor_position += 1
# Select option
elif menu_input.was_key_pressed(SDLK_RETURN):
self.running = False
| if self.cursor_position == 0:
self.launch_game()
current_time = SDL_GetTicks() # units.MS
elapsed_time = current_time - last_update_time # units.MS
self.update(min(elapsed_time, MAX_FRAME_TIME))
last_update_time = current_time
self.renderer.render(self.sprites)
# This loop lasts 1/60th of a second, or 1000/60th ms
ms_per_frame = 1000 // FPS # units.MS
elapsed_time = SDL_GetTicks() - start_time # units.MS
if elapsed_time < ms_per_frame:
SDL_Delay(ms_per_frame - elapsed_time)
def launch_game(self):
game = Game(self.world, self.window, self.renderer, self.factory)
game.run()
self.running = True
self.run()
|
Balannen/LSMASOMM | atom3/Models/TMWQuestDragonEggActions_MDL.py | Python | gpl-3.0 | 56,827 | 0.017632 | """
__TMWQuestDragonEggActions_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: bogdan
Modified: Wed May 2 00:27:03 2018
______________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from OrgUnit import *
from Role import *
from Action import *
from IndividualKnArt import *
from Objective import *
from isPartOfOrgUnit import *
from canHaveRole import *
from hasActions import *
from canAccessKnArt import *
from isPartOfObjective import *
from hasObjective import *
from precedentTo import *
from graph_canHaveRole import *
from graph_canAccessKnArt import *
from graph_isPartOfOrgUnit import *
from graph_Action import *
from graph_precedentTo import *
from graph_Objective import *
from graph_hasObjective import *
from graph_Role import *
from graph_OrgUnit import *
from graph_IndividualKnArt import *
from graph_isPartOfObjective import *
from graph_hasActions import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def TMWQuestDragonEggActions_MDL(self, rootNode, LSMASOMMRootNode=None):
# --- Generating attributes code for ASG LSMASOMM ---
if( LSMASOMMRootNode ):
# agentImplementation
LSMASOMMRootNode.agentImplementation.setValue( (['SPADE', 'Enmasse', 'EveJS'], 0) )
LSMASOMMRootNode.agentImplementation.config = 0
# author
LSMASOMMRootNode.author.setValue('Annonymous')
# description
LSMASOMMRootNode.description.setValue('\n')
LSMASOMMRootNode.description.setHeight(15)
# name
LSMASOMMRootNode.name.setValue('TMW')
# title
LSMASOMMRootNode.title.setValue('QuestDragonEgg')
# --- ASG attributes over ---
self.obj118=OrgUnit(self)
self.obj118.isGraphObjectVisual = True
if(hasattr(self.obj118, '_setHierarchicalLink')):
self.obj118._setHierarchicalLink(False)
# Individual
self.obj118.Individual.setValue(('1', 0))
self.obj118.Individual.config = 0
# hasActions
self.obj118.hasActions.setActionFlags([ 1, 1, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('ChangeRole', 20)
lcobj2.append(cobj2)
self.obj118.hasActions.setValue(lcobj2)
# ID
self.obj118.ID.setValue('OU|0')
# name
self.obj118.name.setValue('Avatar')
# UnitSize
self.obj118.UnitSize.setValue('Individual')
self.obj118.graphClass_= graph_OrgUnit
if self.genGraphics:
new_obj = graph_OrgUnit(530.0,890.0,self.obj118)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("OrgUnit", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj118.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj118)
self.globalAndLocalPostcondition(self.obj118, rootNode)
self.obj118.postAction( rootNode.CREATE )
self.obj119=OrgUnit(self)
self.obj119.isGraphObjectVisual = True
if(hasattr(self.obj119, '_setHierarchicalLink')):
self.obj119._setHierarchicalLink(False)
# Individual
self.obj119.Individual.setValue(('1', 0))
self.obj119.Individual.config = 0
# hasActions
self.obj119.hasActions.setActionFlags([ 1, 1, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('ChangeRole', 20)
lcobj2.append(cobj2)
self.obj119.hasActions.setValue(lcobj2)
# ID
self.obj119.ID.setValue('OU|1')
# name
self.obj119.name.setValue('Party')
# UnitSize
self.obj119.UnitSize.setValue('Group')
self.obj119.graphClass_= graph_OrgUnit
if self.genGraphics:
new_obj = graph_OrgUnit(370.0,890.0,self.obj119)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("OrgUnit", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj119.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj119)
self.globalAndLocalPostcondition(self.obj119, rootNode)
self.obj119.postAction( rootNode.CREATE )
self.obj104=Role(self)
self.obj104.isGraphObjectVisual = True
if(hasattr(self.obj104, '_setHierarchicalLink')):
self.obj104._setHierarchicalLink(False)
# isMetaRole
self.obj104.isMetaRole.setValue((None, 0))
self.obj104.isMetaRole.config = 0
# hasActions
self.obj104.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj104.hasActions.setValue(lcobj2)
# ID
| self.obj104.ID.setValue('R|0')
# | name
self.obj104.name.setValue('Scout')
self.obj104.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(190.0,730.0,self.obj104)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj104.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj104)
self.globalAndLocalPostcondition(self.obj104, rootNode)
self.obj104.postAction( rootNode.CREATE )
self.obj105=Role(self)
self.obj105.isGraphObjectVisual = True
if(hasattr(self.obj105, '_setHierarchicalLink')):
self.obj105._setHierarchicalLink(False)
# isMetaRole
self.obj105.isMetaRole.setValue((None, 0))
self.obj105.isMetaRole.config = 0
# hasActions
self.obj105.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('harvestItem', 20)
lcobj2.append(cobj2)
cobj2=ATOM3String('craftItem', 20)
lcobj2.append(cobj2)
self.obj105.hasActions.setValue(lcobj2)
# ID
self.obj105.ID.setValue('R|1')
# name
self.obj105.name.setValue('Maker')
self.obj105.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(340.0,730.0,self.obj105)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj105.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj105)
self.globalAndLocalPostcondition(self.obj105, rootNode)
self.obj105.postAction( rootNode.CREATE )
self.obj106=Role(self)
self.obj106.isGraphObjectVisual = True
if(hasattr(self.obj106, '_setHierarchicalLink')):
self.obj106._setHierarchicalLink(False)
# isMetaRole
self.obj106.isMetaRole.setValue((None, 0))
self.obj106.isMetaRole.config = 0
# hasActions
self.obj106.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('learnSpell', 20)
lcobj2.append(cobj2)
self.obj106.hasActions.setValue(lcobj2)
# ID
self.obj106.ID.setValue('R|2')
# name
self.obj106.name.setValue('Wizard')
self.obj106.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(790.0,730.0,self.obj106)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj106.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj106)
self.globalAndLocalPostcondition(self.obj106, rootNode)
self.obj106.postAction( rootNode.CREATE )
self.obj125=Role |
brython-dev/brython | www/src/Lib/turtle.py | Python | bsd-3-clause | 51,962 | 0.002656 | # A revised version of CPython's turtle module written for Brython
#
# Note: This version is not intended to be used in interactive mode,
# nor use help() to look up methods/functions definitions. The docstrings
# have thus been shortened considerably as compared with the CPython's version.
#
# All public methods/functions of the CPython version should exist, if only
# to print out a warning that they are not implemented. The intent is to make
# it easier to "port" any existing turtle program from CPython to the browser.
#
# IMPORTANT: We use SVG for drawing turtles. If we have a turtle at an angle
# of 350 degrees and we rotate it by an additional 20 degrees, we will have
# a turtle at an angle of 370 degrees. For turtles drawn periodically on
# a screen (like typical animations, including the CPython turtle module),
# drawing a turtle with a rotation of 370 degrees is the same as a rotation of
# 10 degrees. However, using SVG, if we "slowly" animate an object,
# rotating it from 350 to 370 degrees, the result will not be the same
# as rotating it from 350 to 10 degrees. For this reason, we did not use the
# Vec2D class from the CPython module and handle the rotations quite differently.
import math
import sys
from math import cos, sin
from browser import console, document, html, timer
import _svg as svg
#import copy
# Even though it is a private object, use the same name for the configuration
# dict as the CPython's module.
# Commented out configuration items are those found on the CPython version
_CFG = {
# "width" : 0.5, # Screen
# "height" : 0.75,
"canvwidth" : 500,
"canvheight": 500,
# "leftright": None,
# "topbottom": None,
"mode": "standard",
# "colormode": 1.0,
# "delay": 10,
# "undobuffersize": 1000,
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
# "resizemode" : "noresize",
"visible" : True,
# "language": "english", # docstrings
# "exampleturtle": "turtle",
# "examplescreen": "screen",
# "title": "Python Turtle Graphics",
# "using_IDLE": False
# Below are configuration items specific to this version
"turtle_canvas_wrapper": None,
"turtle_canvas_id": "turtle-canvas",
"min_duration": "1ms"
}
_cfg_copy = _CFG.copy()
def set_defaults(**params):
"""Allows to override defaults."""
_CFG.update(**params)
Screen().reset()
class FormattedTuple(tuple):
'''used to give a nicer representation of the position'''
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __repr__(self):
return "(%.2f, %.2f)" % self
def create_circle(r):
'''Creates a circle of radius r centered at the origin'''
circle = svg.circle(x=0, y=0, r=r, stroke="black", fill="black")
circle.setAttribute("stroke-width", 1)
return circle
def create_polygon(points):
'''Creates a polygon using the points provided'''
points = ["%s,%s " % (x, y) for x, y in points]
polygon = svg.polygon(points=points, stroke="black", fill="black")
polygon.setAttribute("stroke-width", 1)
return polygon
def create_rectangle(width=2, height=2, rx=None, ry=None):
'''Creates a rectangle centered at the origin. rx and ry can be
used to have rounded corners'''
rectangle = svg.rect(x=-width / 2, y=-height / 2, width=width,
height=height, stroke="black", fill="black")
rectangle.setAttribute("stroke-width", 1)
if rx is not None:
rectangle.setAttribute("rx", rx)
if ry is not None:
rectangle.setAttribute("ry", ry)
return rectangle
def create_square(size=2, r=None):
'''Creates a square centered at the origin. rx and ry can be
used to have rounded corners'''
return create_rectangle(width=size, height=size, rx=r, ry=r)
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Screen(metaclass=Singleton):
def __init__(self):
self.shapes = {
'arrow': (create_polygon, ((-10, 0), (10, 0), (0, 10))),
'turtle': (create_polygon, ((0, 16), (-2, 14), (-1, 10), (-4, 7),
(-7, 9), (-9, 8), (-6, 5), (-7, 1), (-5, -3), (-8, -6),
(-6, -8), (-4, -5), (0, -7), (4, -5), (6, -8), (8, -6),
(5, -3), (7, 1), (6, 5), (9, 8), (7, 9), (4, 7), (1, 10),
(2, 14))),
'classic': (create_polygon, ((0, 0), (-5, -9), (0, -7), (5, -9))),
'triangle': (create_polygon, ((10, -5.77), (0, 11.55), (-10, -5.77))),
'square': (create_square, 20),
'circle': (create_circle, 10)
}
self.reset()
self._set_geometry()
def bgcolor(self, color=None):
"""sets the b | ackground with the given color if color is not None,
else return current background color.
"""
if color is None:
return self.background_color
self.background_color = color
width = _CFG['canvwidth']
height = _CFG['canvheight']
if self.mode() in ['logo', 'standard']:
x = -width // 2
y = -height / | / 2
else:
x = 0
y = -height
self.frame_index += 1
rect = svg.rect(x=x, y=y, width=width, height=height, fill=color,
style={'display': 'none'})
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block",
dur=_CFG["min_duration"], fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index - 1))
rect <= an
self.background_canvas <= rect
def _convert_coordinates(self, x, y):
"""In the browser, the increasing y-coordinate is towards the
bottom of the screen; this is the opposite of what is assumed
normally for the methods in the CPython turtle module.
This method makes the necessary orientation. It should be called
just prior to creating any SVG element.
"""
return x * self.yscale, self.y_points_down * y * self.yscale
def create_svg_turtle(self, _turtle, name):
if name in self.shapes:
fn = self.shapes[name][0]
arg = self.shapes[name][1]
else:
print("Unknown turtle '%s'; the default turtle will be used")
fn = self.shapes[_CVG["shape"]][0]
arg = self.shapes[_CVG["shape"]][1]
shape = fn(arg)
if self._mode == 'standard' or self._mode == 'world':
rotation = -90
else:
rotation = 0
return shape, rotation
def _dot(self, pos, size, color):
"""Draws a filled circle of specified size and color"""
if color is None:
color = 'black'
if size is None or size < 1:
size = 1
self.frame_index += 1
# `size` represents the diameter, svg needs the radius
radius = size / 2
x, y = self._convert_coordinates(pos[0], pos[1])
circle = svg.circle(cx=x, cy=y, r=radius, fill=color,
style={'display': 'none'})
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block",
dur=_CFG["min_duration"], fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index - 1))
circle <= an
self.canvas <= circle
def _drawline(self, _turtle, coordlist=None,
color=None, width=1, speed=None):
"" |
ceph/autotest | frontend/afe/frontend_test_utils.py | Python | gpl-2.0 | 7,043 | 0.00213 | import atexit, datetime, os, tempfile, unittest
import common
from autotest_lib.frontend import setup_test_environment
from autotest_lib.frontend import thread_local
from autotest_lib.frontend.afe import models, model_attributes
from autotest_lib.client.common_lib import global_config
from autotest_lib.client.common_lib.test_utils import mock
class FrontendTestMixin(object):
def _fill_in_test_data(self):
"""Populate the test database with some hosts and labels."""
if models.DroneSet.drone_sets_enabled():
models.DroneSet.objects.create(
name=models.DroneSet.default_drone_set_name())
acl_group = models.AclGroup.objects.create(name='my_acl')
acl_group.users.add(models.User.current_user())
self.hosts = [models.Host.objects.create(hostname=hostname)
for hostname in
('host1', 'host2', 'host3', 'host4', 'host5', 'host6',
'host7', 'host8', 'host9')]
acl_group.hosts = self.hosts
models.AclGroup.smart_get('Everyone').hosts = []
self.labels = [models.Label.objects.create(name=name) for name in
('label1', 'label2', 'label3', 'label4', 'label5',
'label6', 'label7', 'label8')]
platform = models.Label.objects.create(name='myplatform', platform=True)
for host in self.hosts:
host.labels.add(platform)
atomic_group1 = models.AtomicGroup.objects.create(
name='atomic1', max_number_of_machines=2)
atomic_group2 = models.AtomicGroup.objects.create(
name='atomic2', max_number_of_machines=2)
self.label3 = self.labels[2]
self.label3.only_if_needed = True
self.label3.save()
self.label4 = self.labels[3]
self.label4.atomic_group = atomic_group1
self.label4.save()
self.label5 = self.labels[4]
self.label5.atomic_group = atomic_group1
self.label5.save()
self.hosts[0].labels.add(self.labels[0]) # label1
self.hosts[1].labels.add(self.labels[1]) # label2
self.label6 = self.labels[5]
self.label7 = self.labels[6]
self.label8 = self.labels[7]
self.label8.atomic_group = atomic_group2
self.label8.save()
for hostnum in xrange(4,7): # host5..host7
self.hosts[hostnum].labels.add(self.label4) # an atomic group lavel
self.hosts[hostnum].labels.add(self.label6) # a normal label
self.hosts[6].labels.add(self.label7)
for hostnum in xrange(7,9): # host8..host9
self.hosts[hostnum].labels.add(self.label5) # an atomic group lavel
self.hosts[hostnum].labels.add(self.label6) # a normal label
self.hosts[hostnum].labels.add(self.label7)
def _frontend_common_setup(self, fill_data=True):
self.god = mock.mock_god(ut=self)
setup_test_environment.set_up()
global_config.global_config.override_config_value(
'AUTOTEST_WEB', 'parameterized_jobs', 'False')
if fill_data:
self._fill_in_test_data()
def _frontend_common_teardown(self):
setup_test_environment.tear_down()
thread_local.set_user(None)
self.god.unstub_all()
def _create_job(self, hosts=[], metahosts=[], priority=0, active=False,
synchronous=False, atomic_group=None, hostless=False,
drone_set=None, control_file='control',
parameterized_job=None):
"""
Create a job row in the test database.
@param hosts - A list of explicit host ids for this job to be
scheduled on.
@param metahosts - A list of label ids for each host that this job
should be scheduled on (meta host scheduling).
@param priority - The job priority (integer).
@param active - bool, mark this job as running or not in the database?
@param synchronous - bool, if True use synch_count=2 otherwise use
synch_count=1.
@param atomic_group - An atomic group id for this job to schedule on
or None if atomic scheduling is not required. Each metahost
becomes a request to schedule an entire atomic group.
This does not support creating an active atomic group job.
@param hostless - if True, this job is intended to be hostless (in that
case, hosts, metahosts, and atomic_group must all be empty)
@returns A Django frontend.afe.models.Job instance.
"""
if not drone_set:
drone_set = (models.DroneSet.default_drone_set_name()
and models.DroneSet.get_default())
assert not (atomic_group and active) # TODO(gps): support this
synch_count = synchronous and 2 or 1
created_on = datetime.datetime(2008, 1, 1)
status = models.HostQueueEntry.Status.QUEUED
if active:
status = models.HostQueueEntry.Status.RUNNING
job = models.Job.objects.create(
name='test', owner='autotest_system', priority=priority,
synch_count=synch_count, created_on=created_on,
reboot_before=model_attributes.RebootBefore.NEVER,
drone_set=drone_set, control_file=control_file,
parameterized_job=parameterized_job)
for host_id in hosts:
models.HostQueueEntry.objects.create(job=job, host_id=host_id,
status=status,
atomic_group_id=atomic_group)
models.IneligibleHostQueue.objects.create(job=job, host_id=host_id)
for label_id in metahosts:
models.HostQueueEntry.objects.create(job=job, meta_host_id=label_id,
status=status,
atomic_group_id=atomic_group)
if atomic_group and not (metahosts or hosts):
# Create a single HQE to request the atomic group of hosts even if
# no metahosts or hosts are supplied.
models.HostQueueEntry.objects.create(job=job,
status=status,
atomic_group_id=atomic_group)
if hostless:
assert not (hosts or metahosts or atomic_group)
models.HostQueueEntry.objects.create(job=job, status=status)
return job
def _create_job_simple(self, hosts, use_metahost=False,
priority=0, active=False, drone_set=None | ):
"""An alternative interface to _create_job"""
args = {'hosts' : [], 'metahosts' : []}
if use_metahost:
args['metahosts'] = hosts
else:
args['hosts'] = hosts
return self._create_job(priority=priority, active=ac | tive,
drone_set=drone_set, **args)
|
jbradberry/django-starsweb | starsweb/tests/test_plugins.py | Python | mit | 2,406 | 0 | from __future__ import absolute_import
from django | .contrib.auth.models import User
from django.test import TestCase
from .. import models, plugins
class TurnGenerationTestCase(TestCase):
def setUp(self):
self.plugin = plugins.TurnGeneration()
| self.user = User.objects.create_user(username='test',
password='password')
self.game = models.Game(
name="Foobar",
slug="foobar",
host=self.user,
description="This *game* is foobared.",
)
self.game.save()
models.GameOptions.objects.create(game=self.game)
def test_active_ambassador(self):
r1 = self.game.races.create(name="Gestalti", plural_name="Gestalti",
slug="gestalti")
r1.ambassadors.create(name="KonTiki", user=self.user, active=True)
perms = (
'turngeneration.add_pause',
'turngeneration.change_pause',
'turngeneration.delete_pause',
'turngeneration.add_ready',
'turngeneration.change_ready',
'turngeneration.delete_ready',
)
for perm in perms:
self.assertTrue(self.user.has_perm(perm, r1))
def test_inactive_ambassador(self):
r1 = self.game.races.create(name="Gestalti", plural_name="Gestalti",
slug="gestalti")
r1.ambassadors.create(name="Jeff", user=self.user, active=False)
perms = (
'turngeneration.add_pause',
'turngeneration.change_pause',
'turngeneration.delete_pause',
'turngeneration.add_ready',
'turngeneration.change_ready',
'turngeneration.delete_ready',
)
for perm in perms:
self.assertFalse(self.user.has_perm(perm, r1))
def test_user_not_on_race(self):
r1 = self.game.races.create(name="Gestalti", plural_name="Gestalti",
slug="gestalti")
perms = (
'turngeneration.add_pause',
'turngeneration.change_pause',
'turngeneration.delete_pause',
'turngeneration.add_ready',
'turngeneration.change_ready',
'turngeneration.delete_ready',
)
for perm in perms:
self.assertFalse(self.user.has_perm(perm, r1))
|
fzimmermann89/pyload | module/plugins/internal/Captcha.py | Python | gpl-3.0 | 4,078 | 0.006866 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import time
from module.plugins.internal.Plugin import Plugin
from module.plugins.internal.utils import encode
class Captcha(Plugin):
__name__ = "Captcha"
__type__ = "captcha"
__version__ = "0.47"
__status__ = "stable"
__description__ = """Base anti-captcha plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def __init__(self, plugin): #@TODO: Pass pyfile instead plugin, so store plugin's html in its associated pyfile as data
self._init(plugin.pyload)
self.plugin = plugin
self.task = None #: captchaManager task
self.init()
def _log(self, level, plugintype, pluginname, messages):
messages = (self.__name__,) + messages
return self.plugin._log(level, plugintype, self.plugin.__name__, messages)
def recognize(self, image):
"""
Extend to build your custom anti-captcha ocr
"""
pass
def decrypt(self, url, get={}, post={}, ref=False, cookies=True, decode=False, req=None,
input_type='jpg', output_type='textual', ocr=True, timeout=120):
img = self.load(url, get=get, post=post, ref=ref, cookies=cookies, decode=decode, req=req or self.plugin.req)
return self.decrypt_image(img, input_type, output_type, ocr, timeout)
def decrypt_image(self, data, input_type='jpg', output_type='textual', ocr=False, timeout=120):
"""
Loads a captcha and decrypts it with ocr, plugin, user in | put
:param data: image raw data
:param get: get part for request
:param post: post part for request
| :param cookies: True if cookies should be enabled
:param input_type: Type of the Image
:param output_type: 'textual' if text is written on the captcha\
or 'positional' for captcha where the user have to click\
on a specific region on the captcha
:param ocr: if True, ocr is not used
:return: result of decrypting
"""
result = ""
time_ref = ("%.2f" % time.time())[-6:].replace(".", "")
with open(os.path.join("tmp", "captcha_image_%s_%s.%s" % (self.plugin.__name__, time_ref, input_type)), "wb") as tmp_img:
tmp_img.write(encode(data))
if ocr:
if isinstance(ocr, basestring):
OCR = self.pyload.pluginManager.loadClass("captcha", ocr) #: Rename `captcha` to `ocr` in 0.4.10
result = OCR(self.plugin).recognize(tmp_img.name)
else:
result = self.recognize(tmp_img.name)
if not result:
captchaManager = self.pyload.captchaManager
try:
self.task = captchaManager.newTask(data, input_type, tmp_img.name, output_type)
captchaManager.handleCaptcha(self.task)
self.task.setWaiting(max(timeout, 50)) #@TODO: Move to `CaptchaManager` in 0.4.10
while self.task.isWaiting():
self.plugin.check_status()
time.sleep(1)
finally:
captchaManager.removeTask(self.task)
if self.task.error:
self.fail(self.task.error)
elif not self.task.result:
self.plugin.retry_captcha(msg=_("No captcha result obtained in appropriate time"))
result = self.task.result
if not self.pyload.debug:
try:
os.remove(tmp_img.name)
except OSError, e:
self.log_warning(_("Error removing `%s`") % tmp_img.name, e)
# self.log_info(_("Captcha result: ") + result) #@TODO: Remove from here?
return result
def invalid(self):
if not self.task:
return
self.log_warning(_("Invalid captcha"))
self.task.invalid()
def correct(self):
if not self.task:
return
self.log_info(_("Correct captcha"))
self.task.correct()
|
learningequality/kolibri | kolibri/core/analytics/management/commands/benchmark.py | Python | mit | 5,977 | 0.002008 | import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from morango.models import InstanceIDModel
import kolibri
from kolibri.core.analytics import SUPPORTED_OS
from kolibri.core.analytics.measurements import get_channels_usage_info
from kolibri.core.analytics.measurements import get_db_info
from kolibri.core.analytics.measurements import get_kolibri_process_cmd
from kolibri.core.analytics.measurements import get_kolibri_use
from kolibri.core.analytics.measurements import get_machine_info
from kolibri.core.analytics.measurements import get_requests_info
from kolibri.utils.server import installation_type
from kolibri.utils.server import NotRunning
from kolibri.utils.system import get_free_space
from kolibri.utils.time_utils import local_now
def format_line(parameter, value, indented=False):
if indented:
info = " * {:30}".format("{}:".format(parameter))
else:
info = "* {:32}".format("{}:".format(parameter))
return "{info}{value}".format(info=info, value=value)
class Command(BaseCommand):
"""
This command will output information about different parameters of the server running Kolibri
Output example:
Sessions
* Active sessions (guests incl): 10
* Active users in (10 min): 6
* Active users in (1 min): 3
CPU
* Total processes: 351
* Used CPU: 33.6%
* Kolibri CPU usage: 22.3%
Memory
* Used memory: 9.3 GB
* Total memory: 16.0 GB
* Kolibri memory usage: 56.8 MB
Channels
* Total Channels: 2
* Khan Academy (English)
* Accesses: 150
* Time spent: 301.22 s
* African Storybook
* Accesses: 3
* Time spent: 18.00 s
Requests timing
* Homepage: 0.03 s
* Recom | mended channels: 0.01 s
* Channels: 0.02 s
Device info
* Version: (version)
* OS: (os)
* Installer: (installer)
* Database: (database_path)
* Device name: (device_name)
* Free disk space: (content_storage_free | _space)
* Server time: (server_time)
* Server timezone: (server_timezone)
"""
help = "Outputs performance info and statistics of usage for the running Kolibri instance in this server"
def handle(self, *args, **options):
if not SUPPORTED_OS:
print("This OS is not yet supported")
sys.exit(1)
try:
get_kolibri_use()
except NotRunning:
sys.exit("Profile command executed while Kolibri server was not running")
get_requests_info()
self.messages = []
self.add_header("Sessions")
session_parameters = (
"Active sessions (guests incl)",
"Active users in (10 min)",
"Active users in (1 min)",
)
session_info = get_db_info()
self.add_section(session_parameters, session_info)
self.add_header("CPU")
kolibri_cpu, kolibri_mem = get_kolibri_use()
used_cpu, used_memory, total_memory, total_processes = get_machine_info()
cpu_parameters = ("Total processes", "Used CPU", "Kolibri CPU usage")
cpu_values = (
total_processes,
"{} %".format(used_cpu),
"{} %".format(kolibri_cpu),
)
self.add_section(cpu_parameters, cpu_values)
self.add_header("Memory")
memory_parameters = ("Used memory", "Total memory", "Kolibri memory usage")
memory_values = (
"{} Mb".format(used_memory),
"{} Mb".format(total_memory),
"{} Mb".format(kolibri_mem),
)
self.add_section(memory_parameters, memory_values)
self.add_header("Channels")
channels_stats = get_channels_usage_info()
self.messages.append(format_line("Total Channels", str(len(channels_stats))))
for channel in channels_stats:
self.messages.append("\033[95m* {}\033[0m".format(channel.name))
self.messages.append(format_line("Accesses", channel.accesses, True))
self.messages.append(format_line("Time spent", channel.time_spent, True))
self.add_header("Requests timing")
requests_stats = get_requests_info()
requests_parameters = ("Homepage", "Recommended channels", "Channels")
self.add_section(requests_parameters, requests_stats)
self.add_header("Device info")
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
self.messages.append(format_line("Version", kolibri.__version__))
self.messages.append(format_line("OS", instance_model.platform))
self.messages.append(
format_line("Installer", installation_type(get_kolibri_process_cmd()))
)
self.messages.append(
format_line("Database", settings.DATABASES["default"]["NAME"])
)
self.messages.append(format_line("Device name", instance_model.hostname))
self.messages.append(
format_line(
"Free disk space", "{} Mb".format(get_free_space() / pow(10, 6))
)
)
self.messages.append(format_line("Server time", local_now()))
self.messages.append(format_line("Server timezone", settings.TIME_ZONE))
self.messages.append("")
print("\n".join(self.messages))
def add_header(self, header):
self.messages.append("")
self.messages.append("\033[1m{}\033[0m".format(header))
def add_section(self, params, values):
for index, param in enumerate(params):
self.messages.append(format_line(param, values[index]))
|
jamtot/HackerEarth | Problems/Small Factorials/smlfc.py | Python | mit | 213 | 0.014085 | def fac(n):
if n == 0: return 1
else:
return n * fac(n-1)
def smallfacs(T):
for t in xrange( | T):
| print fac(int(raw_input()))
if __name__ == "__main__":
smallfacs(int(raw_input()))
|
lukasjuhrich/pycroft | setup.py | Python | apache-2.0 | 2,320 | 0 | """
Pycroft
-------
Pycroft is the user management system of the AG DSN
(Arbeitsgemeinschaft Dresdner Studentennetz)
Notes for developers
--------------------
When editing this file, you need to re-build the docker image for the
changes to take effect. On a running system, you can just execute
``pip install -e .`` to update e.g. console script names.
"""
from setuptools import setup, find_packages
setup(
name="pycroft",
author="The Pycroft Authors",
description="AG DSN user management software",
long_description=__doc__,
version="0.1.0",
url="http://github.com/agdsn/pycroft/",
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
zip_safe=False,
python_requires=">= 3.4",
install_requires=[
'alembic',
'celery ~= 3.1.25',
'Flask',
'Flask-Babel',
'Flask-Login',
'Flask-WTF',
'fints',
'Jinja2',
'MarkupSafe',
'SQLAlchemy >= 1.1',
'WTForms',
'Werkzeug',
'jsonschema',
'ipaddr >= 2.2.0',
'passlib',
'psycopg2 >= 2.7.0',
'reportlab',
'simplejson',
'wrapt',
],
dependency_links=[
'git+git://github.com/lukasjuhrich/sqlalchemy_schemadisplay.git'
'@master#egg=sqlalchemy-schemadisplay',
],
extras_require={
'SchemaDisplay': [
'sqlalchemy-schemadisplay',
]
},
tests_require=[
'factory-boy',
'Flask-Testing',
'fixture',
| 'nose',
'pydot',
],
entry_points={
'console_scripts': [
'pycroft = scripts.server_run:main',
'pycroft_ldap_sync = ldap_sync.__main__:main',
]
},
license="Apache Software License",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: System Administrators',
| 'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
],
)
|
maxpumperla/hyperas | hyperas/optim.py | Python | mit | 11,301 | 0.002124 | import inspect
import os
import re
import sys
import nbformat
import numpy as np
from hyperopt import fmin
from nbconvert import PythonExporter
from .ensemble import VotingModel
from .utils import (
remove_imports, remove_all_comments, extract_imports, temp_string,
write_temp_files, determine_indent, with_line_numbers, unpack_hyperopt_vals,
eval_hyperopt_ | space, find_signature_end)
sys.path.append(".")
def minimize(model,
data,
algo,
max_evals,
trials,
functions=None,
rseed=1337,
notebook_name=None,
verbose=True,
| eval_space=False,
return_space=False,
keep_temp=False,
data_args=None):
"""
Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameter-less function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
notebook_name: If running from an ipython notebook, provide filename (not path)
verbose: Print verbose output
eval_space: Evaluate the best run in the search space such that 'choice's contain actually meaningful values instead of mere indices
return_space: Return the hyperopt search space object (e.g. for further processing) as last return value
keep_temp: Keep temp_model.py file on the filesystem
data_args: Arguments to be passed to data function
Returns
-------
If `return_space` is False: A pair consisting of the results dictionary of the best run and the corresponding
keras model.
If `return_space` is True: The pair of best result and corresponding keras model, and the hyperopt search space
"""
best_run, space = base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
full_model_string=None,
notebook_name=notebook_name,
verbose=verbose,
keep_temp=keep_temp,
data_args=data_args)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
# unpack the values from lists without overwriting the mutable dict within 'trial'
unpacked_vals = unpack_hyperopt_vals(vals)
# identify the best_run (comes with unpacked values from the hyperopt function `base.Trials.argmin`)
if unpacked_vals == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
if eval_space is True:
# evaluate the search space
best_run = eval_hyperopt_space(space, best_run)
if return_space is True:
# return the space as well
return best_run, best_model, space
else:
# the default case for backwards compatibility with expanded return arguments
return best_run, best_model
def base_minimizer(model, data, functions, algo, max_evals, trials,
rseed=1337, full_model_string=None, notebook_name=None,
verbose=True, stack=3, keep_temp=False, data_args=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args=data_args)
temp_file = './temp_model.py'
write_temp_files(model_str, temp_file)
if 'temp_model' in sys.modules:
del sys.modules["temp_model"]
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
if not keep_temp:
os.remove(temp_file)
os.remove(temp_file + 'c')
except OSError:
pass
try:
# for backward compatibility.
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
return_argmin=True),
get_space()
)
except TypeError:
pass
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rstate=np.random.RandomState(rseed),
return_argmin=True),
get_space()
)
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals,
trials, voting='hard', weights=None, nb_classes=None, functions=None):
model_list = best_models(nb_models=nb_ensemble_models,
model=model,
data=data,
algo=algo,
max_evals=max_evals,
trials=trials,
functions=functions)
return VotingModel(model_list, voting, weights, nb_classes)
def best_models(nb_models, model, data, algo, max_evals, trials, functions=None, keep_temp=False):
base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
stack=4,
keep_temp=keep_temp)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models - 1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
def get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args):
model_string = inspect.getsource(model)
model_string = remove_imports(model_string)
if notebook_name:
notebook_path = os.getcwd() + "/{}.ipynb".format(notebook_name)
with open(notebook_path, 'r') as f:
notebook = nbformat.reads(f.read(), nbformat.NO_CONVERT)
exporter = PythonExporter()
source, _ = exporter.from_notebook_node(notebook)
else:
calling_script_file = os.path.abspath(inspect.stack()[stack][1])
with open(calling_script_file, 'r') as f:
source = f.read()
cleaned_source = remove_all_comments(source)
imports = extract_imports(cleaned_source, verbose)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params, verbose)
functions_string = retrieve_function_string(functions, verbose)
data_string = retrieve_data_string(data, verbose, data_args)
model = hyperopt_keras_model(model_string, parts, aug_parts, verbose)
temp_str = temp_string(imports, model, data_string, functions_string, space)
return temp_str
def get_hyperopt_space(parts, hyperopt_params, verbose=True):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
if verbose:
print('>>> Hyperas search space:\n')
print(space)
|
the-gigi/quote-service | grpc_demo/grpc_quote_server.py | Python | mit | 1,420 | 0.001408 | import random
from concurrent import futures
import time
from collections import defaultdict
import grpc
import sys
from pathlib import Path
sys.path.insert(0, '')
from quote_service_pb2 import QuoteReply
from quote_service_pb2_grpc import (add_QuoterServicer_to_server,
QuoterServicer)
script_dir = Path(__file__).parent
lines = open(str(script_dir / '../quotes.txt')).read().split('\n')
quotes = defaultdict(list)
for line in (x for x in lines if x):
q, a = line.split('~')
quotes[a.strip()].append(q.stri | p())
all_authors = tuple(quotes.keys())
class QuoteService(QuoterServicer):
def GetQuote(self, request, context):
# Choose random author if it re | quested author doesn't has quotes
if request.author in all_authors:
author = request.author
else:
author = random.choice(all_authors)
# Choose random quote from this author
quote = random.choice(quotes[author])
return QuoteReply(quote=quote, author=author)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_QuoterServicer_to_server(QuoteService(), server)
server.add_insecure_port('[::]:5050')
server.start()
print('Started...')
try:
while True:
time.sleep(9999)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
horazont/aioxmpp | tests/presence/__init__.py | Python | lgpl-3.0 | 877 | 0.00114 | # | #######################################################################
# File name: __init__.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# Li | cense, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
|
dimagi/commcare-hq | corehq/apps/data_interfaces/migrations/0006_case_rule_refactor.py | Python | bsd-3-clause | 4,903 | 0.003671 | # Generated by Django 1.10.6 on 2017-04-04 12:54
import django.db.models.deletion
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0005_remove_match_type_choices'),
]
operations = [
migrations.CreateModel(
name='CaseRuleAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='CaseRuleCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ClosedParentDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(default='parent', max_length=126)),
('relationship_id', models.PositiveSmallIntegerField(default=1)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomActionDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomMatchDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MatchPropertyDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property_name', models.CharField(max_length=126)),
('property_value', models.CharField(max_length=126, null=True)),
('match_type', models.CharField(max_length=15)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UpdateCaseDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('properties_to_update', jsonfield.fields.JSONField(default=list)),
('close_case', models.BooleanField()),
| ],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='automaticupdaterule',
name='migrated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='caserulecriteria',
name='closed_parent_definition',
field=models.ForeignK | ey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.ClosedParentDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='custom_match_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomMatchDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='match_property_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.MatchPropertyDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='custom_action_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomActionDefinition'),
),
migrations.AddField(
model_name='caseruleaction',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='update_case_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.UpdateCaseDefinition'),
),
]
|
honzamach/pynspect | pynspect/filters.py | Python | mit | 10,618 | 0.005086 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# This file is part of Pynspect package (https://pypi.python.org/pypi/pynspect).
# Originally part of Mentat system (https://mentat.cesnet.cz/).
#
# Copyright (C) since 2016 CESNET, z.s.p.o (http://www.ces.net/).
# Copyright (C) since 2016 Jan Mach <honza.mach.ml@gmail.com>
# Use of this source is governed by the MIT license, see LICENSE file.
#-------------------------------------------------------------------------------
"""
This module provides tools for data filtering based on filtering and query
grammar.
The filtering grammar is thoroughly described in following modules:
* :py:mod:`pynspect.lexer`
Lexical analyzer, descriptions of valid grammar tokens.
* :py:mod:`pynspect.gparser`
Grammar parser, language grammar description
* :py:mod:`pynspect.rules`
Object representation of grammar rules, interface definition
* :py:mod:`pynspect.jpath`
The addressing language JPath.
Please refer to appropriate module for more in-depth information.
There are following main tools in this package:
* :py:class:`DataObjectFilter`
Tool capable of filtering data structures according to given filtering rules.
Available filtering functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* ``size``
Return the size/length of given list. This enables writing rules like events
with more than five source addressess::
size(Source.IP4) > 5
* ``time``
Return current Unix timestamp as ``float``.
* ``utcnow``
Return current date and time in UTC timezone. This enables writing rules like
events with detection time older than two hours::
DetectTime < (utcnow() - 02:00:00)
Example filters
^^^^^^^^^^^^^^^
Following is a non exhaustive list of example filtering rules::
DetectTime < (utcnow() - 02:00:00)
exists EventTime and exists DetectTime and EventTime > DetectTime
Category in ['Anomaly.Connection'] and Source.Type in ['Booter']
Category in ['Attempt.Exploit'] and (Target.Port in [3306] or Source.Proto in ['mysql'] or Target.Proto in ['mysql'])
.. warning::
Be carefull with the grammar function names. Currently, there is a flaw in the expression
grammar that forbids using function names that begin with the same characters as
grammar keywords like 'and', 'le', 'like', etc. For example the name 'len' is not
a valid function name, because there is a collision with 'le' comparison operator.
.. todo::
There is quite a lot of code that needs to be written before actual filtering
can take place. In the future, there should be some kind of object, that will
be tailored for immediate processing and will take care of initializing
uderlying parser, compiler and filter. This object will be designed later.
"""
__author__ = "Jan Mach <jan.mach@cesnet.cz>"
__credits__ = "Pavel Kácha <pavel.kacha@cesnet.cz>"
import time
import datetime
from pynspect.rules import FilteringRuleException
from pynspect.traversers import BaseFilteringTreeTraverser
from pynspect.jpath import jpath_values
#-------------------------------------------------------------------------------
def grfcbk_size(args):
"""
Grammar rule function callback: **size**. This function will count the size of
first item in argument list.
:param list args: List of function arguments.
:return: Size of the first item in argument list.
:rtype: int
"""
return len(args[0])
def grfcbk_strlen(args):
"""
Grammar rule function callback: **strlen**. This function will measure the
string length of all subitems of the first item in argument list.
:param list args: List of function arguments.
:return: Length of all subitems of the first item in argument list.
:rtype: int or list
"""
if not args[0]:
return None
if isinstance(args[0], list):
return [len(x) for x in args[0]]
return len(args[0])
def grfcbk_time(args):
"""
Grammar rule function callback: **time**. This function will call the
:py:func:`time.time` function and return the result.
:param list args: List of function arguments | . Should be empty, but
:return: The time in seconds since the epoch as a floating point number.
:rtype: float
"""
if args:
raise FilteringRuleException("The 'time' function does not take any arguments.")
return time.time()
def grfcbk_utcnow(args):
"""
Grammar rule function callback: **utcnow**. This function will call the
:py:func:`datetime.datetime.utcnow` function and return the result.
:param l | ist args: List of function arguments. Should be empty, but
:return: Current datetime in UTC timezone.
:rtype: datetime.datetime
"""
if args:
raise FilteringRuleException("The 'utcnow' function does not take any arguments.")
return datetime.datetime.utcnow()
#-------------------------------------------------------------------------------
class DataObjectFilter(BaseFilteringTreeTraverser):
"""
Rule tree traverser implementing default object filtering logic.
Following example demonstrates DataObjectFilter usage in conjuction with
PynspectFilterParser::
>>> flt = DataObjectFilter()
>>> psr = PynspectFilterParser()
>>> psr.build()
>>> rule = psr.parse('ID like "e214d2d9"')
>>> result = flt.filter(rule, test_msg)
You may use the built-in shortcuts for parsing and compiling rules:
>>> flt = DataObjectFilter(
... parser = PynspectFilterParser,
... compiler = IDEAFilterCompiler
... )
>>> rule = flt.prepare('(Source.IP4 == 188.14.166.39)')
>>> result = flt.filter(rule, test_msg)
Rule tree can be created by hand/programatically:
>>> rule = ComparisonBinOpRule('OP_GT', VariableRule("ConnCount"), IntegerRule(1))
>>> result = flt.filter(rule, test_msg)
"""
def __init__(self, parser = None, compiler = None):
super(DataObjectFilter, self).__init__()
self.register_function('size', grfcbk_size)
self.register_function('strlen', grfcbk_strlen)
self.register_function('time', grfcbk_time)
self.register_function('utcnow', grfcbk_utcnow)
self.parser = parser
self.compiler = compiler
if callable(self.parser):
self.parser = self.parser()
self.parser.build()
if callable(self.compiler):
self.compiler = self.compiler()
def prepare(self, rule):
"""
Parse and/or compile given rule into rule tree.
:param rule: Filtering grammar rule.
:return: Parsed and/or compiled rule.
"""
if self.parser:
rule = self.parser.parse(rule)
if self.compiler:
rule = self.compiler.compile(rule)
return rule
def filter(self, rule, data):
"""
Apply given filtering rule to given data structure.
:param pynspect.rules.Rule rule: filtering rule to be checked
:param any data: data structure to check against rule, ussually dict
:return: True or False or expression result
:rtype: bool or any
"""
return rule.traverse(self, obj = data)
#---------------------------------------------------------------------------
def ipv4(self, rule, **kwargs):
"""
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.ipv4` interface.
"""
return rule.value
def ipv6(self, rule, **kwargs):
"""
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.ipv6` interface.
"""
return rule.value
def datetime(self, rule, **kwargs):
"""
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.datetime` interface.
"""
return rule.value
def timedelta(self, rule, **kwargs):
"""
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.timedelta` interface.
"""
return rule.value
def integer(self, rule, **kwargs):
"""
Implementation of :py:func:`pyn |
robertmattmueller/sdac-compiler | options.py | Python | gpl-3.0 | 1,765 | 0.001133 | # -*- coding: utf-8 -*-
import argparse
import sys
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"domain", help="path to domain pddl file")
argparser.add_argument(
"task", help="path to task pddl file")
argparser.add_argument(
"--full-encoding",
dest="use_partial_encoding", action="store_false",
help="By default we represent facts that occur in multiple "
"mutex groups only in one variable. Using this parameter adds "
"these facts to multiple variables. This can make the meaning "
"of the variables clearer, but increases the number of facts.")
argparser.add_argument(
"--invariant-generation-max-candidates", default=100000, type=int,
help="max number of candidates for invariant generation "
"(default: %(default)d). Set to 0 to disable invariant "
"generation and obtain only binary variables. The limit is "
"needed for grounded input files that would otherwise produce "
"too many candidates.")
argparser.add_argument(
"--invariant-generation-max-time", default=300, type=int,
help="max time for invariant generation (default: %(default)ds)")
argparser.a | dd_argument(
"--viz", action="store_true",
help="visualization for evmdd based action cost transformation")
argparser.add_argument(
"--order", dest="order",
help="path to EVMDD variable ordering file")
return argparser.parse_args()
def copy_args_to_module(args):
module_dict = sys.modules[__name__].__dict__
for key, value in vars(args).items():
module_dict[ | key] = value
def setup():
args = parse_args()
copy_args_to_module(args)
setup()
|
fbradyirl/home-assistant | homeassistant/components/mqtt/vacuum/schema_legacy.py | Python | apache-2.0 | 19,586 | 0.000817 | """Support for Legacy MQTT vacuum."""
import logging
import json
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumDevice,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, CONF_DEVICE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.components.mqtt import (
CONF_UNIQUE_ID,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from . import MQTT_VACUUM_SCHEMA, services_to_strings, strings_to_services
_LOGGER = logging.getLogger(__name__)
SERVICE_TO_STRING = {
SUPPORT_TURN_ON: "turn_on",
SUPPORT_TURN_OFF: "turn_off",
SUPPORT_PAUSE: "pause",
SUPPORT_STOP: "stop",
SUPPORT_RETURN_HOME: "return_home",
SUPPORT_FAN_SPEED: "fan_speed",
SUPPORT_BATTERY: "battery",
SUPPORT_STATUS: "status",
SUPPORT_SEND_COMMAND: "send_comman | d",
SUPPORT_LOCATE: | "locate",
SUPPORT_CLEAN_SPOT: "clean_spot",
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
DEFAULT_SERVICES = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
)
ALL_SERVICES = (
DEFAULT_SERVICES
| SUPPORT_PAUSE
| SUPPORT_LOCATE
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
)
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_BATTERY_LEVEL_TEMPLATE = "battery_level_template"
CONF_BATTERY_LEVEL_TOPIC = "battery_level_topic"
CONF_CHARGING_TEMPLATE = "charging_template"
CONF_CHARGING_TOPIC = "charging_topic"
CONF_CLEANING_TEMPLATE = "cleaning_template"
CONF_CLEANING_TOPIC = "cleaning_topic"
CONF_DOCKED_TEMPLATE = "docked_template"
CONF_DOCKED_TOPIC = "docked_topic"
CONF_ERROR_TEMPLATE = "error_template"
CONF_ERROR_TOPIC = "error_topic"
CONF_FAN_SPEED_LIST = "fan_speed_list"
CONF_FAN_SPEED_TEMPLATE = "fan_speed_template"
CONF_FAN_SPEED_TOPIC = "fan_speed_topic"
CONF_PAYLOAD_CLEAN_SPOT = "payload_clean_spot"
CONF_PAYLOAD_LOCATE = "payload_locate"
CONF_PAYLOAD_RETURN_TO_BASE = "payload_return_to_base"
CONF_PAYLOAD_START_PAUSE = "payload_start_pause"
CONF_PAYLOAD_STOP = "payload_stop"
CONF_PAYLOAD_TURN_OFF = "payload_turn_off"
CONF_PAYLOAD_TURN_ON = "payload_turn_on"
CONF_SEND_COMMAND_TOPIC = "send_command_topic"
CONF_SET_FAN_SPEED_TOPIC = "set_fan_speed_topic"
DEFAULT_NAME = "MQTT Vacuum"
DEFAULT_PAYLOAD_CLEAN_SPOT = "clean_spot"
DEFAULT_PAYLOAD_LOCATE = "locate"
DEFAULT_PAYLOAD_RETURN_TO_BASE = "return_to_base"
DEFAULT_PAYLOAD_START_PAUSE = "start_pause"
DEFAULT_PAYLOAD_STOP = "stop"
DEFAULT_PAYLOAD_TURN_OFF = "turn_off"
DEFAULT_PAYLOAD_TURN_ON = "turn_on"
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES, SERVICE_TO_STRING)
PLATFORM_SCHEMA_LEGACY = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_BATTERY_LEVEL_TEMPLATE, "battery"): cv.template,
vol.Inclusive(
CONF_BATTERY_LEVEL_TOPIC, "battery"
): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CHARGING_TEMPLATE, "charging"): cv.template,
vol.Inclusive(CONF_CHARGING_TOPIC, "charging"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CLEANING_TEMPLATE, "cleaning"): cv.template,
vol.Inclusive(CONF_CLEANING_TOPIC, "cleaning"): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Inclusive(CONF_DOCKED_TEMPLATE, "docked"): cv.template,
vol.Inclusive(CONF_DOCKED_TOPIC, "docked"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_ERROR_TEMPLATE, "error"): cv.template,
vol.Inclusive(CONF_ERROR_TOPIC, "error"): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Inclusive(CONF_FAN_SPEED_TEMPLATE, "fan_speed"): cv.template,
vol.Inclusive(CONF_FAN_SPEED_TOPIC, "fan_speed"): mqtt.valid_publish_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_PAYLOAD_CLEAN_SPOT, default=DEFAULT_PAYLOAD_CLEAN_SPOT
): cv.string,
vol.Optional(
CONF_PAYLOAD_LOCATE, default=DEFAULT_PAYLOAD_LOCATE
): cv.string,
vol.Optional(
CONF_PAYLOAD_RETURN_TO_BASE, default=DEFAULT_PAYLOAD_RETURN_TO_BASE
): cv.string,
vol.Optional(
CONF_PAYLOAD_START_PAUSE, default=DEFAULT_PAYLOAD_START_PAUSE
): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_OFF, default=DEFAULT_PAYLOAD_TURN_OFF
): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_ON, default=DEFAULT_PAYLOAD_TURN_ON
): cv.string,
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS
): vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(mqtt.CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_VACUUM_SCHEMA.schema)
)
async def async_setup_entity_legacy(
config, async_add_entities, config_entry, discovery_hash
):
"""Set up a MQTT Vacuum Legacy."""
async_add_entities([MqttVacuum(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttVacuum(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
VacuumDevice,
):
"""Representation of a MQTT-controlled legacy vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._cleaning = False
self._charging = False
self._docked = False
self._error = None
self._status = "Unknown"
self._battery_level = 0
self._fan_speed = "unknown"
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._name = config[CONF_NAME]
supported_feature_strings = config[CONF_SUPPORTED_FEATURES]
self._supported_features = strings_to_services(
supported_feature_strings, STRING_TO_SERVICE
)
self._fan_speed_list = config[CONF_FAN_SPEED_LIST]
self._qos = config[mqtt.CONF_QOS]
self._retain = config[mqtt.CONF_RETAIN]
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key)
for key in (
CONF_PAYLOAD_TURN_ON,
CONF_PAYLOAD_TURN_OFF,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_STOP,
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractKoreanovelsCom.py | Python | bsd-3-clause | 933 | 0.030011 | def extractKoreanovelsCom(item):
'''
Parser for 'koreanovels.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if item['title'].startswith("Link ") and item['tags'] == ['RSS']:
return buildReleaseMessageWithType(item, 'Level 1 Skel | eton', vol, chp, frag=frag, postfix=postfix, tl_type='translated')
if item['title'].startswith("MoS Link ") and item['tags'] == ['RSS']:
return buildReleaseMessageWithType(item, 'Master of Strength', vol, chp, frag=frag, postfix=postfix, tl_type='transl | ated')
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
crossbario/crossbar-examples | prompt/cbf.py | Python | apache-2.0 | 3,778 | 0.006617 | import click
from click_repl import register_repl
class CmdGlobal(object):
def __init__(self):
self.current_resource_type = None
self.current_resource = None
def __str__(self):
return 'CmdGlobal(current_resource_type={}, current_resource={})'.format(self.current_resource_type, self.current_resource)
class CmdConfig(object):
def __init__(self):
self.verbose = None
self.resource_type = None
self.resource = None
def __str__(self):
return 'CmdConfig(verbose={}, resource_type={}, resource={})'.format(self.verbose, self.resource_type, self.resource)
global_cfg = CmdGlobal()
@click.group()
@click.option(
'--verbose',
is_flag=True,
default=False
)
@click.pass_context
def cli(ctx, verbose):
print(ctx)
cfg = CmdConfig()
cfg.verbose = verbose
ctx.obj = cfg
@cli.command(name='say')
@click.option(
'--message',
default='Hello, world!',
help='Set the message to say hello',
)
@click.pass_obj
def cmd_say(cfg, message):
click.echo(message)
@cli.group(name='cd', help='change current resource')
@click.pass_obj
def cmd_cd(cfg):
pass
@cmd_cd.command(name='node', help='change current resource')
@click.argument('resource')
@click.pass_obj
def cmd_cd_node(cfg, resource):
"""
Change current resource
"""
global_cfg.current_resource_type = 'node'
global_cfg.current_resource = resource
click.echo(cfg)
click.echo(global_cfg)
@cmd_cd.command(name='worker')
@click.argument('resource')
@click.pass_obj
def cmd_cd_worker(cfg, resource):
global_cfg.current_resource_type = 'worker'
global_cfg.current_resource = resource
click.echo(cfg)
click.echo(global_cfg)
@cmd_cd.command(name='transport')
@click.argument('resource')
@click.pass_obj
def cmd_cd_transport(cfg, resource):
global_cfg.current_resource_type = 'transport'
global_cfg.current_resource = resource
click.echo(cfg)
click.echo(global_cfg)
@cli.group(name='stop', help='Stop a resource.')
@click.pass_obj
def cmd_stop(cfg):
pass
@cmd_stop.command(name='transport', help='Stop a router transport.')
@click.argument('resource')
@click.option(
'--mode',
help='graceful: wait for all clients to disconnect before stopping\n\nimmediate: stop transport forcefully disconnecting all clients',
type=click.Choice(['graceful', 'immediate']),
default='graceful'
)
@click.pass_obj
def cmd_stop_transport(cfg, resource, mode):
cfg.resource_type = 'transport'
cfg.resource = resource
click.echo(cfg)
click.echo(global_cfg)
#@cmd_stop.command(name='worker')
#@click.argument('node')
#@click.argument('worker')
#@click.pass_obj
#def cmd_stop_worker(cfg, node, worker):
# pass
#
#
#@cmd_stop.command(name='realm')
#@click.argument('node')
#@click.argument('worker')
#@click.argument('realm')
#@click.pass_obj
#def cmd_stop_realm(cfg, node, worker, realm):
# pass
@cli.group(name='start')
@click.pass_obj
def cmd_start(cfg):
pass
@cmd_start.command(name='worker')
@click.argument('resource')
@click.option(
'--type',
required=True
)
@click.pass_obj
def cmd_start_worker(cfg, resource, type):
pass
@cmd_start.command(name='realm')
@click.argument('resource')
| @click.option(
'--name',
required=True
)
@click.pass_obj
def cmd_start_realm(cfg, resource, name):
pass
#@click.command()
#@click.option('--count', default=1, help='Number of greetings.')
#@click.option('--name', prompt='Your name',
| # help='The person to greet.')
#def hello(count, name):
# """Simple program that greets NAME for a total of COUNT times."""
# for x in range(count):
# click.echo('Hello %s!' % name)
register_repl(cli)
if __name__ == '__main__':
cli()
|
xuweiliang/Codelibrary | openstack_dashboard/dashboards/admin/systemlogs/trans.py | Python | apache-2.0 | 6,683 | 0.004938 | __author__ = 'Zero'
from django.utils.translation import pgettext_lazy
STATUS_DISPLAY_CHOICES = (
("CREATE INSTANCE", pgettext_lazy("Action of an Instance", u"Create Instance")),
("DELETE INSTANCE", pgettext_lazy("Action of an Instance", u"Delete Instance")),
("UPDATE INSTANCE", pgettext_lazy("Action of an Instance", u"Update Instance")),
("EVACUATE", pgettext_lazy("Action of an Instance", u"Evacuate")),
("RESTORE INSTANCE", pgettext_lazy("Action of an Instance", u"Restore Instance")),
("STOP INSTANCE", pgettext_lazy("Action of an Instance", u"Stop Instance")),
("START INSTANCE", pgettext_lazy("Action of an Instance", u"Start")),
("REBOOT INSTANCE", pgettext_lazy("Action of an Instance", u"Reboot Instance")),
("REBUILD INSTANCE", pgettext_lazy("Action of an Instance", u"Rebuild")),
("REVERT RESIZE", pgettext_lazy("Action of an Instance", u"Revert Resize")),
("CONFIRM RESIZE", pgettext_lazy("Action of an Instance", u"Confirm Resize")),
("RESIZE INSTANCE", pgettext_lazy("Action of an Instance", u"Resize")),
("MIGRATE", pgettext_lazy("Action of an Instance", u"Migrate")),
("PAUSE", pgettext_lazy("Action of an Instance", u"Pause")),
("UNPAUSE", pgettext_lazy("Action of an Instance", u"Unpause")),
("SUSPEND", pgettext_lazy("Action of an Instance", u"Suspend")),
("RESUME", pgettext_lazy("Action of an Instance", u"Resume")),
("RESCUE", pgettext_lazy("Action of an Instance", u"Rescue")),
("UNRESCUE", pgettext_lazy("Action of an Instance", u"Unrescue")),
("CHANGE PASSWORD", pgettext_lazy("Action of an Instance", u"Change Password")),
("SHELVE", pgettext_lazy("Action of an Instance", u"Shelve")),
("UNSHELVE", pgettext_lazy("Action of an Instance", u"Unshelve")),
("CREATE SNAPSHOT", pgettext_lazy("Action of an Instance", u"Create Snapshot")),
("CREATE DEVSNAPSHOT", pgettext_lazy("Action of an Instance", u"Create Devsnapshot")),
("REVERT DEVSNAPSHOT", pgettext_lazy("Action of an Instance", u"Revert Devsnapshot")),
("DELETE DEVSNAPSHOT", pgettext_lazy("Action of an Instance", u"Delete Devsnapshot")),
("UPDATE NAME", pgettext_lazy("Action of an Instance", u"Update Name")),
("UPDATE USB", pgettext_lazy("Action of an Instance", u"Update Usb")),
("UPDATE SCREEN", pgettext_lazy("Action of an Instance", u"Update Screen")),
("REALLOCATE INSTANCE", pgettext_lazy("Action of an Instance", u"Reallocate Instance")),
("ATTACH/DETACH CDROM", pgettext_lazy("Action of an Instance", u"Attach/Detach CDrom")),
("CREATE AGGREGATE", pgettext_lazy("Action of an Instance", u"Create Aggregate")),
("UPDATE AGGREGATE", pgettext_lazy("Action of an Instance", u"Update Aggregate")),
("DELETE AGGREGATE", pgettext_lazy("Action of an Instance", u"Delete Aggregate")),
("CREATE FLAVOR", pgettext_lazy("Action of an Instance", u"Create Flavor")),
("DELETE FLAVOR", pgettext_lazy("Action of an Instance", u"Delete Flavor")),
("CREATE VOLUME", pgettext_lazy("Action of a volume", u"Create Volume")),
("DELETE VOLUME | ", pgettext_lazy("Action of a volume", u"Delete Volume")),
("UPDATE VOLUME", pgettext_lazy("Action of a volume", u"Update Volume")),
("UPDATE VOLUME STATUS", pgettext_lazy("Action of a volume", u"Update Volume Status")),
("CREATE VOLUME SNAPSHOT", pgettext_lazy("Action of a volume", u"Create | Volume Snapshot")),
("DELETE VOLUME SNAPSHOT", pgettext_lazy("Action of a volume", u"Delete Volume Snapshot")),
("UPLOAD IMAGE TO VOLUME", pgettext_lazy("Action of a volume", u"Upload Image To Volume")),
("REGISTER LICENSE", pgettext_lazy("Action of admin", u"Register License")),
("ADD AGENT", pgettext_lazy("Action of a Network", u"Add Agent")),
("DELETE AGENT", pgettext_lazy("Action of a Network", u"Delete Agent")),
("CREATE PORT", pgettext_lazy("Action of a Network", u"Create Port")),
("UPDATE PORT", pgettext_lazy("Action of a Network", u"Update Port")),
("DELETE PORT", pgettext_lazy("Action of a Network", u"Delete Port")),
("CREATE SUBNET", pgettext_lazy("Action of a Network", u"Create Subnet")),
("UPDATE SUBNET", pgettext_lazy("Action of a Network", u"Update Subnet")),
("DELETE SUBNET", pgettext_lazy("Action of a Network", u"Delete Subnet")),
("CREATE NETWORK", pgettext_lazy("Action of a Network", u"Create Network")),
("UPDATE NETWORK", pgettext_lazy("Action of a Network", u"Update Network")),
("DELETE NETWORK", pgettext_lazy("Action of a Network", u"Delete Network")),
("CREATE TENANT", pgettext_lazy("Action of a Pool", u"Create Tenant")),
("UPDATE TENANT", pgettext_lazy("Action of a Pool", u"Update Tenant")),
("DELETE TENANT", pgettext_lazy("Action of a Pool", u"Delete Tenant")),
("ADD INTERFACE", pgettext_lazy("Action of a Router", u"Add Interface")),
("SET GATEWAY", pgettext_lazy("Action of a Router", u"Set Gateway")),
("REMOVE INTERFACE", pgettext_lazy("Action of a Router", u"Remove Interface")),
("CREATE ROUTER", pgettext_lazy("Action of a Router", u"Create Router")),
("UPDATE ROUTER", pgettext_lazy("Action of a Router", u"Update Router")),
("DELETE ROUTER", pgettext_lazy("Action of a Router", u"Delete Router")),
("DELETE USER", pgettext_lazy("Action of an User", u"Delete User")),
("CLEAR GATEWAY", pgettext_lazy("Action of a Router", u"Clear Gateway")),
("CREATE USER", pgettext_lazy("Action of an User", u"Create User")),
("UPDATE USER", pgettext_lazy("Action of an User", u"Update User")),
("TOGGLE USER", pgettext_lazy("Action of an User", u"Toggle User")),
("DISABLE USER", pgettext_lazy("Action of an User", u"Disable User")),
("ENABLE USER", pgettext_lazy("Action of an User", u"Enable User")),
("DELETE IMAGE", pgettext_lazy("Action of an Image", u"Delete Image")),
("CREATE IMAGE", pgettext_lazy("Action of an Image", u"Create Image")),
("UPDATE IMAGE", pgettext_lazy("Action of an Image", u"Update Image")),
("DELETE TEMPLATE", pgettext_lazy("Action of an Image", u"Delete Template")),
("ATTACH VOLUME", pgettext_lazy("Action of a volume", u"Attach Volume")),
("DETACH VOLUME", pgettext_lazy("Action of a volume", u"Detach Volume")),
("LOGIN", pgettext_lazy("Action of admin", u"Login")),
("LOGOUT", pgettext_lazy("Action of admin", u"Logout")),
("REQUEST REMOTE ASSISTANCE", pgettext_lazy("Action of an Instance", u"Request Remote Assistance")),
("WAIT REMOTE ASSISTANCE", pgettext_lazy("Action of an Instance", u"Wait Remote Assistance")),
)
RESULT_DISPLAY_CHOICES = (
("SUCCESS", pgettext_lazy("Status of result", u"Success")),
("FAILURE", pgettext_lazy("Status of result", u"Failure")),
)
|
jmartinz/pyCrawler | 10.contratacionE/pce_extrae_detalle_contrato.py | Python | apache-2.0 | 7,393 | 0.010299 | # coding=utf-8
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from bs4 import BeautifulSoup
from datetime import datetime
from decimal import *
import sys
#phantonPath = "/home/jmartinz/00.py/phantomjs/phantomjs"
phantonPath = "../phantomjs/phantomjs"
contratacionPage = "https://contrataciondelestado.es/wps/portal/!ut/p/b1/lZDLDoIwEEU_aaYParssrwLxAVZQujEsjMH42Bi_30rcGCPq7CZz7pzkgoOWKC6kYBPYgDt3t37fXfvLuTs-die2PFlEUZpRlJbFSKdxXYvMrybwQOsB_DAah3xopdQh0YislqhFVUXK_0HFnvmARbwpmlLY3CDmWRpPaxKgoeI3_4jgxW_sjPhzwkRAkRhLn_mPAvqn_13wJb8GNyBjDQzAWMXjEgrz7HLaQeuxyVY3SaVzxXARLj1WlLNVaShB5LCCNoGTO6Z-VH7g3R2UoLEz/dl4/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_AVEQAI930OBRD02JPMTPG21004/act/id=0/p=javax.servlet.include.path_info=QCPjspQCPbusquedaQCPBusquedaVIS_UOE.jsp/299420689304/-/"
#contratacionPage="https://contrataciondelestado.es"
class detalleContrato():
""" Clase que devuelve los detalles de un contrato por nº expediente y Órgano de contratación
numExpediente
OrgContratacion
driverType=1 (Firefox, online) / 2(phantomjs)
"""
driver = ""
driverType = 1
estadoLic = ""
procedimiento = ""
enlacelic = ''
codigocpv = ''
resultado = ''
adjudicatario =''
numlicitadores = 0
impadjudicacion = ''
def __init__(self, numExpediente, OrgContratacion, driverType=1):
self.driverType = driverType
self.numExpediente = numExpediente
self.OrgContratacion = OrgContratacion
if driverType == 1:
self.driver = webdriver.Firefox()
elif driverType == 2:
self.driver = webdriver.PhantomJS(phantonPath, service_args=['--ignore-ssl-errors=true'])
self.driver.set_window_size(1120, 550)
self.extraeDetalles()
def cargaPagina(self):
#Carga página
if self.driverType == 2:
self.driver.implicitly_wait(10)
self.driver.set_page_load_timeout(10)
try:
self.driver.get(contratacionPage)
except TimeoutException as e: #Handle y
#Handle your exception here
print(e)
def debugPhanton(self):
self.cargaPagina()
# check phantomjs
print(self.driver.page_source)
def extraeDetalles(self):
sel | f.cargaPagina()
#Introduce contrato
contrato = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:text71ExpMAQ')
contrato.send_keys(self.numExpediente)
#Introduce ´organo contrataci´on
orgcont = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:texoorganoMAQ')
orgcont.s | end_keys(self.OrgContratacion)
# pulsa el botón de buscar
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:button1').click()
#Obtener enlace
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:enlaceExpediente_0').click() #sólo sirve para el primer expediente... como es este caso.
# Obtiene los datos
self.estadoLic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Estado').text
self.procedimiento = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Procedimiento').text
self.enlacelic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_EnlaceLicPLACE').text
self.codigocpv = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_CPV').text
#Dependiendo del estado los siguientes elementos pueden existir o no
try:
self.resultado = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Resultado').text
self.adjudicatario = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Adjudicatario').text
importe_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_ImporteAdjudicacion').text.replace(".","").replace(",",".")
try:
self.impadjudicacion = Decimal(importe_text.strip(' "'))
except (ValueError, TypeError, DecimalException) as e:
self.impadjudicacion = 0
numlicitadores_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_NumeroLicitadores').text
try:
self.numlicitadores = int(numlicitadores_text)
except ValueError:
self.numlicitadores =0
print("numlic= ",self.numlicitadores)
except NoSuchElementException:
resultado = ''
adjudicatario =''
numlicitadores = 0
impadjudicacion = ''
# En linea saca los documentos de la página
html_page = self.driver.page_source
soup = BeautifulSoup(html_page, "html5lib")
self.Documento={}
for row in soup.findAll("tr", {'class': ['rowClass1', 'rowClass2']}):
try:
fechadoc=datetime.strptime(row.find("td", {'class': 'fechaPubLeft'}).text, '%d/%m/%Y %H:%M:%S')
tipodoc=row.find("td", {'class': 'tipoDocumento'}).text
docs = row.find("td", {'class': 'documentosPub'}).findAll('div')
enlacedoc = docs[0].find('a', href=True)['href']
self.Documento[tipodoc]=[fechadoc,enlacedoc]
except: # documentos adicionales
try:
fechadoc = datetime.strptime(row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textSfecha1PadreGen').text, '%d/%m/%Y %H:%M:%S')
tipodoc = row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textStipo1PadreGen').text
enlace =row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:linkVerDocPadreGen')['href']
self.Documento[tipodoc]=[fechadoc,enlacedoc]
except:
pass
# Cierra el driver
self.driver.quit()
# Sólo para probar que funcina
def main(nExp,orgCon):
# detalles=detalleContrato(numExpediente = u'2015/213/00008', OrgContratacion=u'Secretaría General de la Agencia Española de Medicamentos y Productos Sanitarios', driverType=2)
# detalles=detalleContrato(numExpediente = u'CR0228/2012', OrgContratacion=u'Secretaría General del Instituto de Salud Carlos III', driverType=2)
detalles=detalleContrato(numExpediente = nExp, OrgContratacion=orgCon, driverType=2)
print(detalles.estadoLic)
print(detalles.procedimiento)
print(detalles.enlacelic)
print(detalles.codigocpv)
print(detalles.resultado)
print(detalles.adjudicatario)
print(detalles.numlicitadores)
print(detalles.impadjudicacion)
for docs in detalles.Documento.keys():
print(docs,"-",detalles.Documento[docs][0],detalles.Documento[docs][1])
if __name__ == "__main__":
if not len(sys.argv) == 3:
print ('Usage: pce_extrae_detalle_contrato.py numExpediente orgContratacion')
sys.exit(1)
sys.exit(main(sys.argv[1], # TODO comprobar 1 ó 2
sys.argv[2], # TODO comprobar entre 6 y 20
))
|
putcn/Paddle | python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py | Python | apache-2.0 | 4,981 | 0.000201 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.flowers as flowers
import math
import paddle.fluid as fluid
import unittest
import numpy as np
import paddle
def Lenet(data, class_dim):
conv1 = fluid.layers.conv2d(data, 32, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 50, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=500, act='relu')
fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax')
return fc2
class TestFetchOp(unittest.TestCase):
def parallel_exe(self, train_inputs, seed):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = seed
with fluid.program_guard(main, startup):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
# TODO(zcd): I found that onece the memory optimizer is open,
# parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD,
# conv2d_1.b_0@GRAD. Those variables should not be pruned.
# fluid.memory_optimize(main)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup)
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
pe = fluid.ParallelExecutor(
use_cuda=True, loss_name=loss.name, main_program=main)
fetch_list = []
all_vars = main.global_block().vars
for k, v in all_vars.iteritems():
if 'tmp' not in k and k[0] is not '_' or v.persistable:
fetch_list.append(k)
for data in train_inputs:
ret = pe.run(fetch_list, feed=feeder.feed(data))
for i in range(len(fetch_list)):
assert not math.isnan(np.sum(ret[i])) and \
not math.isinf(np.sum(ret[i]))
def test_fetch_op(self):
tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16)
tst_reader_iter = tst_reader()
iters = 3
train_inputs = []
for i in range(iters):
train_inputs.append(tst_reader_iter.next())
self.parallel_exe(train_inputs, seed=1)
class TestFeedParallel(unittest.TestCase):
def test_main(self):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = 1
with fluid.scope_guard(fluid.core.Scope()):
with fluid.program_guard(main, start | up):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
| opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
place = fluid.CUDAPlace(0)
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
reader = feeder.decorate_reader(
paddle.batch(
flowers.train(), batch_size=16), multi_devices=True)
exe = fluid.Executor(place)
exe.run(startup)
pe = fluid.ParallelExecutor(
use_cuda=True, loss_name=loss.name, main_program=main)
for batch_id, data in enumerate(reader()):
loss_np = np.array(pe.run(feed=data, fetch_list=[loss.name])[0])
print batch_id, loss_np
if batch_id == 2:
break
if __name__ == '__main__':
unittest.main()
|
bastorer/SPINSpy | matpy/__init__.py | Python | mit | 506 | 0.011858 | # Initialization for pack | age.
| __author__ = "Ben Storer <bastorer@uwaterloo.ca>"
__date__ = "29th of April, 2015"
# Read in the defined functions. Not strictly necessary,
# but makes usage nicer. i.e. now we can use
# matpy.cheb(5) instead of matpy.cheb.cheb(5).
from .cheb import cheb
from .darkjet import darkjet
#from .circular_map import circular_map
from .FiniteDiff import FiniteDiff
from .lightbalance import lightbalance
# Define what happens when someone uses
# from matpy import *
# Nothing
|
aacoppa/inglorious-gangsters | weather.py | Python | mit | 601 | 0.021631 |
import urllib2,json
def almanac(zip):
url = "http://api.wunderground.com/api/4997e70515d4c | bbd/almanac/q/%d.json"%(zip)
r = urllib2.urlopen(url)
data = json.loads(r.read())
almanac = {"record low":data['almanac']['temp_low']['record']['F'].encode("ascii"),
"record high":data['almanac']['temp_high']['record']['F'].encode("ascii"),
"normal low":data['almanac']['temp_ | low']['normal']['F'].encode("ascii"),
"normal high":data['almanac']['temp_high']['normal']['F'].encode("ascii")
}
return almanac
print(almanac(11214))
|
JohnKendrick/PDielec | PDielec/GUI/NoteBook.py | Python | mit | 21,778 | 0.009505 | import sys
import copy
import psutil
import os
from PyQt5.QtWidgets import QWidget, QTabWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import Qt
from PDielec.GUI.MainTab import MainTab
from PDielec.GUI.SettingsTab import SettingsTab
from PDielec.GUI.PowderScenarioTab import PowderScenarioTab
from PDielec.GUI.SingleCrystalScenarioTab import SingleCrystalScenarioTab
from PDielec.GUI.PlottingTab import PlottingTab
from PDielec.GUI.AnalysisTab import AnalysisTab
from PDielec.GUI.ViewerTab import ViewerTab
from PDielec.GUI.FitterTab import FitterTab
from PDielec.Utilities import Debug
from PDielec.GUI.SpreadSheetManager import SpreadSheetManager
import PDielec.Calculator as Calculator
class NoteBook(QWidget):
def __init__(self, parent, program, filename, spreadsheet, debug=False, progressbar=None, scripting=False, default_scenario='powder',ncpus=0, threading=False):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'NoteBook:')
debugger.print('Start:: Initialising')
self.app = parent
self.reader = None
self.progressbars=[progressbar]
if progressbar is None:
self.progressbars = [ ]
self.progressbar_status = 0
self.progressbar_maximum = 0
self.spreadsheet = None
self.threading = threading
if default_scenario == 'powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
if ncpus == 0:
self.ncpus = psutil.cpu_count(logical=False)
else:
self.ncpus = ncpus
self.pool = Calculator.get_pool(self.ncpus,self.threading, debugger = debugger)
self.scripting = scripting
# Overwriting of files is not allowed with a prompt
# If scripting is used then overwriting is allowed
self.overwriting = False
self.debug = debug
#jk self.old_tab_index = None
self.layout = QVBoxLayout()
# The number of tabs before we have scenarios
self.tabOffSet = 2
# Set the plotting tab to None in case a scenario tries to read it
self.plottingTab = None
self.settingsTab = None
self.analysisTab = None
self.viewerTab = None
self.fitterTab = None
self.scenarios = None
#
# Initialize tab screen
#
self.tabs = QTabWidget(self)
self.tabs.currentChanged.connect(self.on_tabs_currentChanged)
self.mainTab = MainTab(self, program, filename, spreadsheet, debug=debug)
self.settingsTab = SettingsTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing settingsTab in notebook initialisation - filename',filename)
self.settingsTab.refresh()
#
# Open more windows
#
debugger.print('Initialising the first scenario')
self.scenarios = []
self.scenarios.append( self.currentScenarioTab(self, debug=debug ) )
self.scenarios[0].setScenarioIndex(0)
self.scenarios[0].settings['Legend'] = 'Scenario 1'
debugger.print('Finished adding the first scenario')
#
# Open the plotting tab
#
self.plottingTab = PlottingTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing plotting because filename is set')
self.plottingTab.refresh()
#
# Open the Analysis tab
#
self.analysisTab = AnalysisTab(self, debug=debug)
if filename != '' and not self.scripting:
debugger.print('Refreshing analysis because filename is set')
self.analysisTab.refresh()
#
# Open the Viewer tab
#
debugger.print('Initialising the viewer tab')
self.viewerTab = ViewerTab(self, debug=debug)
#
# Open the Fitter tab
#
debugger.print('Initialising the fitter tab')
self.fitterTab = FitterTab(self, debug=debug)
#
# Add tabs
#
debugger.print('Adding all tabs to the notebook')
self.tabs.addTab(self.mainTab,'Main')
self.tabs.addTab(self.settingsTab,'Settings')
for i,tab in enumerate(self.scenarios):
tab.requestRefresh()
self.tabs.addTab(tab,'Scenario '+str(i+1))
self.tabs.addTab(self.plottingTab,'Plotting')
self.tabs.addTab(self.analysisTab,'Analysis')
self.tabs.addTab(self.viewerTab,'3D Viewer')
self.tabs.addTab(self.fitterTab,'Fitter')
# Add the tab widget
self. | layout.addWidget(self.tabs)
self.setLayout(self.layout)
de | bugger.print('Finished:: Initialising')
return
def requestRefresh(self):
debugger.print('Start:: requestRefresh')
self.refreshRequired = True
debugger.print('Finished:: requestRefresh')
return
def addScenario(self,scenarioType=None,copyFromIndex=-2):
"""Add Scenario is used by the script to add a new scenario"""
debugger.print('Start:: addScenario for scenarioType', scenarioType,copyFromIndex)
if copyFromIndex != -2:
# If the copyFromIndex is not -2 then we override the scenarioType
last = self.scenarios[copyFromIndex]
scenarioType = last.scenarioType
debugger.print('scenario type has been set from copyFromIndex',scenarioType)
elif scenarioType == None:
# The default behaviour with no parameters in the call, use the last scenario in the list
last = self.scenarios[-1]
scenarioType = last.scenarioType
debugger.print('scenario type has been set from the last scenario',scenarioType)
else:
# copyFromIndex is default so we find the last scenario of scenarioType in the list
last = None
for scenario in self.scenarios:
if scenarioType == scenario.scenarioType:
last = scenario
# end for
if last is None:
debugger.print('Finished:: addScenario unable to add scenario')
return
# Create a new scenario
if scenarioType == 'Powder':
self.currentScenarioTab = PowderScenarioTab
else:
self.currentScenarioTab = SingleCrystalScenarioTab
# Add the scenario to the end of the list
debugger.print('Appending the new scenario')
self.scenarios.append(self.currentScenarioTab(self, self.debug))
# If we have found a previous scenario of the same time set the settings to it
debugger.print('Checking the value of last',last)
if last is not None:
debugger.print('Copying settings from old to new scenario')
self.scenarios[-1].settings = copy.deepcopy(last.settings)
self.scenarios[-1].refresh()
n = len(self.scenarios)
self.tabs.insertTab(self.tabOffSet+n-1,self.scenarios[-1],'Scenario '+str(n))
self.tabs.setCurrentIndex(self.tabOffSet+n-1)
for i,scenario in enumerate(self.scenarios):
scenario.setScenarioIndex(i)
self.tabs.setTabText(self.tabOffSet+i,'Scenario '+str(i+1))
debugger.print('Finished:: addScenario for scenarioType', scenarioType,copyFromIndex)
return
def print_settings(self, filename=None):
# Print the settings of all the settings that have been used to a file settings.py
debugger.print('Start:: print_settings, filename=',filename)
qf = QFileDialog()
qf.setWindowTitle('Save the program settings to a file')
debugger.print('print_setti |
pmaidens/CMPUT404-project | BloggingAPI/BloggingAPI/migrations/0004_auto_20160331_0018.py | Python | apache-2.0 | 432 | 0 | # -*- coding: utf-8 | -*-
# Generated by Django 1.9 on 2016-03-31 00:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('BloggingAPI', '0003_post_image'),
]
operations = [
migrations.RenameField(
model_name='friend',
old_n | ame='display_name',
new_name='displayName',
),
]
|
alonbl/ovirt-host-deploy | src/plugins/ovirt-host-common/hosted-engine/configureha.py | Python | lgpl-2.1 | 3,975 | 0 | #
# ovirt-host-deploy -- ovirt host deployer
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""oVirt Hosted Engine configuration plugin."""
import gettext
import os
from otopi import constants as otopicons
from otopi import filetransaction
from otopi import plugin
from otopi import util
from ovirt_host_deploy import constants as odeploycons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-host-deploy')
@util.export
class Plugin(plugin.PluginBase):
"""oVirt Hosted Engine configuration plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: (
self.environment[
odeploycons.HostedEngineEnv.ACTION
] == odeploycons.Const.HOSTED_ENGINE_ACTION_REMOVE and
os.path.exists(odeploycons.FileLocations.HOSTED_ENGINE_CONF)
)
)
def _clear_ha_conf(self):
self.logger.info(_('Removing hosted-engine configuration'))
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=odeplo | ycons.FileLocations.HOSTED_ENGINE_CONF,
content='',
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
),
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self.environment[
odeploycons.HostedEngineEnv.ACTION
] == odeploycons.Const.HOSTED_ENGINE_ACTION_DEPLOY,
)
def _ | set_ha_conf(self):
self.logger.info(_('Updating hosted-engine configuration'))
content = (
'ca_cert={ca_cert}\n'
).format(
ca_cert=os.path.join(
odeploycons.FileLocations.VDSM_TRUST_STORE,
odeploycons.FileLocations.VDSM_SPICE_CA_FILE
),
)
for env_key in self.environment:
if env_key.startswith(
odeploycons.HostedEngineEnv.HOSTED_ENGINE_CONFIG_PREFIX
):
key = env_key.replace(
odeploycons.HostedEngineEnv.
HOSTED_ENGINE_CONFIG_PREFIX,
''
)
content += '{key}={value}\n'.format(
key=key,
value=self.environment[env_key],
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=odeploycons.FileLocations.HOSTED_ENGINE_CONF,
content=content,
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
),
)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
condition=lambda self: self.environment[
odeploycons.HostedEngineEnv.ACTION
] == odeploycons.Const.HOSTED_ENGINE_ACTION_REMOVE,
)
def _remove_conf(self):
if os.path.exists(odeploycons.FileLocations.HOSTED_ENGINE_CONF):
os.unlink(odeploycons.FileLocations.HOSTED_ENGINE_CONF)
# vim: expandtab tabstop=4 shiftwidth=4
|
michaelarnauts/home-assistant | homeassistant/components/light/demo.py | Python | mit | 1,834 | 0 | """
homeassistant.components.light.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform that implements lights.
"""
import random
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_XY_COLOR)
LIGHT_COLORS = [
[0.861, 0.3259],
[0.6389, 0.3028],
[0.1684, 0.0416]
]
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return demo lights. """
add_devices_callback([
DemoLight("Bed Light", False),
DemoLight("Ceiling", True),
DemoLight("Kitchen", True)
])
class DemoLight(Light):
""" Provides a demo switch. """
def __init__(self, name, state, xy=None, brightness=180):
self._name = name
self._state = state
self._xy = xy or random.choice(LIGHT_COLORS)
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@ | property
def name | (self):
""" Returns the name of the device if any. """
return self._name
@property
def brightness(self):
""" Brightness of this light between 0..255. """
return self._brightness
@property
def color_xy(self):
""" XY color value. """
return self._xy
@property
def is_on(self):
""" True if device is on. """
return self._state
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = True
if ATTR_XY_COLOR in kwargs:
self._xy = kwargs[ATTR_XY_COLOR]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.update_ha_state()
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = False
self.update_ha_state()
|
davidcaste/fabtools | fabtools/require/python.py | Python | bsd-2-clause | 6,264 | 0.000798 | """
Python environments and packages
================================
This module provides high-level tools for using Python `virtual environments`_
and installing Python packages using the `pip`_ installer.
.. _virtual environments: http://www.virtualenv.org/
.. _pip: http://www.pip-installer.org/
"""
from fabtools.python import (
create_virtualenv,
install,
install_pip,
install_requirements,
is_installed,
is_pip_installed,
virtualenv_exists,
)
from fabtools.python_setuptools import (
install_setuptools,
is_setuptools_installed,
)
from fabtools | .system import UnsupportedFamily, distrib_family
MIN_SETUPTOOLS_VERSION = '0.7'
MIN_PIP_VE | RSION = '1.5'
def setuptools(version=MIN_SETUPTOOLS_VERSION, python_cmd='python'):
"""
Require `setuptools`_ to be installed.
If setuptools is not installed, or if a version older than *version*
is installed, the latest version will be installed.
.. _setuptools: http://pythonhosted.org/setuptools/
"""
from fabtools.require.deb import package as require_deb_package
from fabtools.require.rpm import package as require_rpm_package
if not is_setuptools_installed(python_cmd=python_cmd):
family = distrib_family()
if family == 'debian':
require_deb_package('python-dev')
elif family == 'redhat':
require_rpm_package('python-devel')
elif family == 'arch':
pass # ArchLinux installs header with base package
else:
raise UnsupportedFamily(supported=['debian', 'redhat', 'arch'])
install_setuptools(python_cmd=python_cmd)
def pip(version=MIN_PIP_VERSION, pip_cmd='pip', python_cmd='python'):
"""
Require `pip`_ to be installed.
If pip is not installed, or if a version older than *version*
is installed, the latest version will be installed.
.. _pip: http://www.pip-installer.org/
"""
setuptools(python_cmd=python_cmd)
if not is_pip_installed(version, pip_cmd=pip_cmd):
install_pip(python_cmd=python_cmd)
def package(pkg_name, url=None, pip_cmd='pip', python_cmd='python',
allow_external=False, allow_unverified=False, **kwargs):
"""
Require a Python package.
If the package is not installed, it will be installed
using the `pip installer`_.
Package names are case insensitive.
Starting with version 1.5, pip no longer scrapes insecure external
urls by default and no longer installs externally hosted files by
default. Use ``allow_external=True`` or ``allow_unverified=True``
to change these behaviours.
::
from fabtools.python import virtualenv
from fabtools import require
# Install package system-wide (not recommended)
require.python.package('foo', use_sudo=True)
# Install package in an existing virtual environment
with virtualenv('/path/to/venv'):
require.python.package('bar')
.. _pip installer: http://www.pip-installer.org/
"""
pip(MIN_PIP_VERSION, python_cmd=python_cmd)
if not is_installed(pkg_name, pip_cmd=pip_cmd):
install(url or pkg_name,
pip_cmd=pip_cmd,
allow_external=[url or pkg_name] if allow_external else [],
allow_unverified=[url or pkg_name] if allow_unverified else [],
**kwargs)
def packages(pkg_list, pip_cmd='pip', python_cmd='python',
allow_external=None, allow_unverified=None, **kwargs):
"""
Require several Python packages.
Package names are case insensitive.
Starting with version 1.5, pip no longer scrapes insecure external
urls by default and no longer installs externally hosted files by
default. Use ``allow_external=['foo', 'bar']`` or
``allow_unverified=['bar', 'baz']`` to change these behaviours
for specific packages.
"""
if allow_external is None:
allow_external = []
if allow_unverified is None:
allow_unverified = []
pip(MIN_PIP_VERSION, python_cmd=python_cmd)
pkg_list = [pkg for pkg in pkg_list if not is_installed(pkg, pip_cmd=pip_cmd)]
if pkg_list:
install(pkg_list,
pip_cmd=pip_cmd,
allow_external=allow_external,
allow_unverified=allow_unverified,
**kwargs)
def requirements(filename, pip_cmd='pip', python_cmd='python',
allow_external=None, allow_unverified=None, **kwargs):
"""
Require Python packages from a pip `requirements file`_.
Starting with version 1.5, pip no longer scrapes insecure external
urls by default and no longer installs externally hosted files by
default. Use ``allow_external=['foo', 'bar']`` or
``allow_unverified=['bar', 'baz']`` to change these behaviours
for specific packages.
::
from fabtools.python import virtualenv
from fabtools import require
# Install requirements in an existing virtual environment
with virtualenv('/path/to/venv'):
require.python.requirements('requirements.txt')
.. _requirements file: http://www.pip-installer.org/en/latest/requirements.html
"""
pip(MIN_PIP_VERSION, python_cmd=python_cmd)
install_requirements(filename, pip_cmd=pip_cmd, allow_external=allow_external,
allow_unverified=allow_unverified, **kwargs)
def virtualenv(directory, system_site_packages=False, venv_python=None,
use_sudo=False, user=None, clear=False, prompt=None,
virtualenv_cmd='virtualenv', pip_cmd='pip', python_cmd='python'):
"""
Require a Python `virtual environment`_.
::
from fabtools import require
require.python.virtualenv('/path/to/venv')
.. _virtual environment: http://www.virtualenv.org/
"""
package('virtualenv', use_sudo=True, pip_cmd=pip_cmd, python_cmd=python_cmd)
if not virtualenv_exists(directory):
create_virtualenv(
directory,
system_site_packages=system_site_packages,
venv_python=venv_python,
use_sudo=use_sudo,
user=user,
clear=clear,
prompt=prompt,
virtualenv_cmd=virtualenv_cmd,
)
|
ducky64/labelmaker | labelmaker.py | Python | gpl-2.0 | 5,429 | 0.016025 | import argparse
import csv
import codecs
import configparser
import xml.etree.ElementTree as ET
import re
from SvgTemplate import SvgTemplate, TextFilter, ShowFilter, BarcodeFilter, StyleFilter, SvgFilter
from SvgTemplate import clean_units, units_to_pixels, strip_tag
class LabelmakerInputException(Exception):
pass
def config_get(config, section, option, desc):
val = config.get(section, option, fallback=None)
if val is None:
assert False, "Configuration not specified for %s.%s (%s)" % (section, option, desc)
return val
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate label sheet from SVG template")
parser.add_argument('template', type=str,
help="SVG label template")
parser.add_argument('config', type=str,
help="label sheet configuration")
parser.add_argument('data', type=str,
| help="CSV data")
parser.add_argument('output', type=str,
help="SVG generated labels output")
parser.add_argument('--on | ly', type=str, default=None,
help="only process rows which have this key nonempty")
parser.add_argument('--start_row', type=int, default=0,
help="starting row, zero is topmost")
parser.add_argument('--start_col', type=int, default=0,
help="starting column, zero is leftmost")
parser.add_argument('--dir', type=str, default='col',
choices=['col', 'row'],
help="direction labels are incremented in")
args = parser.parse_args()
ET.register_namespace('', "http://www.w3.org/2000/svg")
data_reader = csv.DictReader(codecs.open(args.data, encoding='utf-8'))
if args.only:
if '=' in args.only:
split = args.only.split('=')
assert len(split) == 2
only_parse_key = split[0]
only_parse_val = split[1]
else:
only_parse_key = args.only
only_parse_val = None
else:
only_parse_key = None
config = configparser.ConfigParser()
config.read(args.config)
template = SvgTemplate(args.template, [TextFilter(),
ShowFilter(),
BarcodeFilter(),
StyleFilter(),
SvgFilter(),
])
# Get the filename without the SVG extension so the page number can be added
if args.output[-4:].lower() == '.svg'.lower():
output_name = args.output[:-4]
else:
output_name = args.output
num_rows = int(config_get(config, 'sheet', 'nrows', "number of rows (vertical elements)"))
num_cols = int(config_get(config, 'sheet', 'ncols', "number of columns (horizontal elements)"))
offx = units_to_pixels(config_get(config, 'sheet', 'offx', "initial horizontal offset"))
offy = units_to_pixels(config_get(config, 'sheet', 'offy', "initial vertical offset"))
incx = units_to_pixels(config_get(config, 'sheet', 'incx', "horizontal spacing"))
incy = units_to_pixels(config_get(config, 'sheet', 'incy', "vertical spacing"))
sheet_sizex = config_get(config, 'sheet', 'sizex', "sheet width")
sheet_sizey = config_get(config, 'sheet', 'sizey', "sheet height")
sheet_pixx = units_to_pixels(sheet_sizex)
sheet_pixy = units_to_pixels(sheet_sizey)
if args.dir == 'row':
min_spacing = incx
maj_spacing = incy
min_max = num_cols
maj_max = num_rows
curr_min = args.start_col
curr_maj = args.start_row
elif args.dir == 'col':
min_spacing = incy
maj_spacing = incx
min_max = num_rows
maj_max = num_cols
curr_min = args.start_row
curr_maj = args.start_col
else:
assert False
assert curr_min < min_max, "starting position exceeds bounds"
assert curr_maj < maj_max, "starting position exceeds bounds"
curr_page = 0
output = None
for row in data_reader:
if only_parse_key:
if ((only_parse_val is None and not row[only_parse_key]) or
(only_parse_val is not None and row[only_parse_key] != only_parse_val)):
continue
if output == None:
output = template.clone_base()
svg_elt = output.getroot()
assert strip_tag(svg_elt.tag) == 'svg'
# TODO: support inputs which don't start at (0, 0)
svg_elt.set('width', clean_units(sheet_sizex))
svg_elt.set('height', clean_units(sheet_sizey))
svg_elt.set('viewBox', '0 0 %s %s' %
(sheet_pixx * template.get_viewbox_correction(), sheet_pixy * template.get_viewbox_correction()))
if args.dir == 'row':
pos_x = offx + curr_min * incx
pos_y = offy + curr_maj * incy
elif args.dir == 'col':
pos_y = offy + curr_min * incy
pos_x = offx + curr_maj * incx
else:
assert False
# TODO: make namespace parsing & handling general
new_group = ET.SubElement(output.getroot(), "{http://www.w3.org/2000/svg}g",
attrib={"transform": "translate(%f ,%f)" % (pos_x, pos_y)})
for elt in template.generate(row):
new_group.append(elt)
curr_min += 1
if curr_min == min_max:
curr_min = 0
curr_maj += 1
if curr_maj == maj_max:
output.write("%s_%i.svg" % (output_name, curr_page))
curr_maj = 0
curr_page += 1
output = None
if output is not None:
output.write("%s_%i.svg" % (output_name, curr_page))
|
iaddict/mercurial.rb | vendor/mercurial/tests/silenttestrunner.py | Python | mit | 593 | 0.003373 | import unittest, sys
def main(modulename):
'''run the tests found in module, printing nothing when all tests pass'''
module = sys.modules[modulename]
suite = unittest.defaultTestLoader.loadT | estsFromModule(module)
results = unittest.TestResult()
suite.run(results)
if results.errors or results.failures:
for tc, exc in results.errors:
print 'ERROR:', tc
print
sys.stdout.write(exc)
for tc, exc in results.failures:
print 'FAIL:', tc
print
sys.stdout.write(exc)
sys.exit(1)
| |
yamt/tempest | tempest/services/compute/json/images_client.py | Python | apache-2.0 | 5,741 | 0 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc
from tempest.api_schema.response.compute.v2_1 import images as schema
from tempest.common import service_client
from tempest.common import waiters
class ImagesClientJSON(service_client.ServiceClient):
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
self.validate_response(schema.create_image, resp, body)
return service_client.ResponseBody(resp, body)
def list_images(self, params=None):
"""Returns a list of all images filtered by any parameters."""
url = 'images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images, resp, body)
return service_client.ResponseBodyList(resp, body['images'])
def list_images_with_detail(self, params=None):
"""Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images_details, resp, body)
return service_client.ResponseBodyList(resp, body['images'])
def show_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return service_client.ResponseBody(resp, body['image'])
def delete_image(self, image_id):
"""Deletes the provided image."""
resp, body = self.delete("images/%s" % str(image_id))
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
waiters.wait_for_image_status(self, image_id, status)
def list_image_metadata(self, image_id):
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def set_image_metadata(self, image_id, meta):
"""Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def update_image_metadata(self, image_id, meta):
"""Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return service_client.ResponseBody(resp, body['metadata'])
def get_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.ResponseBody(resp, body['meta'])
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body)
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return service_client.Re | sponseBody(resp, body['meta'])
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_i | d), key))
self.validate_response(schema.delete, resp, body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_image(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image'
|
dockerera/func | func/minion/AuthedXMLRPCServer.py | Python | gpl-2.0 | 5,696 | 0.00316 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw@redhat.com> and Red Hat, Inc.
# Modifications by Seth Vidal - 2007
import sys
import socket
import SimpleXMLRPCServer
from certmaster import SSLCommon
import OpenSSL
import So | cketServer
class AuthedSimpleXMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
# For some reason, httplib closes the connection right after headers
# have been sent if the connection is _not_ HTTP/1.1, which results in
# a "Bad file descriptor" error when the client tries to read from the socket
protocol_version = "HTTP/1.1"
def setup(self):
"""
We need to use socket._fileobject Because SSL.Connection
doesn't have a 'dup'. Not exactly | sure WHY this is, but
this is backed up by comments in socket.py and SSL/connection.c
"""
self.connection = self.request # for doPOST
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def do_POST(self):
self.server._this_request = (self.request, self.client_address)
try:
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.do_POST(self)
except socket.timeout:
pass
except (socket.error, OpenSSL.SSL.SysCallError), e:
sys.stderr.write("Error (%s): socket error - '%s'\n" % (self.client_address, e))
class BaseAuthedXMLRPCServer(SocketServer.ThreadingMixIn):
def __init__(self, address, authinfo_callback=None):
# collect_children is only called in process_request, so at least the last process
# forked is not collected and becomes zombie. workaround it by setting the timeout
# also see; http://bugs.python.org/issue11109
self.timeout = 3
self.allow_reuse_address = 1
self.logRequests = 1
self.authinfo_callback = authinfo_callback
self.funcs = {}
self.instance = None
def get_authinfo(self, request, client_address):
if self.authinfo_callback:
return self.authinfo_callback(request, client_address)
return None
class AuthedSSLXMLRPCServer(BaseAuthedXMLRPCServer, SSLCommon.BaseSSLServer, SimpleXMLRPCServer.SimpleXMLRPCServer):
""" Extension to allow more fine-tuned SSL handling """
def __init__(self, address, pkey, cert, ca_cert, authinfo_callback=None, timeout=None):
BaseAuthedXMLRPCServer.__init__(self, address, authinfo_callback)
if sys.version_info[0] <= 2 and sys.version_info[1] <= 4:
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler)
else:
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler, allow_none=True)
SSLCommon.BaseSSLServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler, pkey, cert, ca_cert, timeout=timeout)
class AuthedXMLRPCServer(BaseAuthedXMLRPCServer, SSLCommon.BaseServer, SimpleXMLRPCServer.SimpleXMLRPCServer):
def __init__(self, address, authinfo_callback=None):
BaseAuthedXMLRPCServer.__init__(self, address, authinfo_callback)
SSLCommon.BaseServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler)
###########################################################
# Testing code only
###########################################################
class ReqHandler:
def ping(self, callerid, trynum):
print callerid
print trynum
return "pong %d / %d" % (callerid, trynum)
class TestServer(AuthedSSLXMLRPCServer):
"""
SSL XMLRPC server that authenticates clients based on their certificate.
"""
def __init__(self, address, pkey, cert, ca_cert):
AuthedSSLXMLRPCServer.__init__(self, address, pkey, cert, ca_cert, self.auth_cb)
def _dispatch(self, method, params):
if method == 'trait_names' or method == '_getAttributeNames':
return dir(self)
# if we have _this_request then we get the peer cert from it
# handling all the authZ checks in _dispatch() means we don't even call the method
# for whatever it wants to do and we have the method name.
if hasattr(self, '_this_request'):
r,a = self._this_request
p = r.get_peer_certificate()
print dir(p)
print p.get_subject()
else:
print 'no cert'
return "your mom"
def auth_cb(self, request, client_address):
peer_cert = request.get_peer_certificate()
return peer_cert.get_subject().CN
if __name__ == '__main__':
if len(sys.argv) < 4:
print "Usage: python AuthdXMLRPCServer.py key cert ca_cert"
sys.exit(1)
pkey = sys.argv[1]
cert = sys.argv[2]
ca_cert = sys.argv[3]
print "Starting the server."
server = TestServer(('localhost', 51234), pkey, cert, ca_cert)
h = ReqHandler()
server.register_instance(h)
server.serve_forever()
|
Elbandi/PyMunin | pysysinfo/phpfpm.py | Python | gpl-3.0 | 2,487 | 0.006836 | """Implements PHPfpmInfo Class for gathering stats from PHP FastCGI Process
Manager using the status page.
The status interface of PHP FastCGI Process Manager must be enabled.
"""
import re
import util
__author__ = "Ali Onur Uyar"
__copyright__ = "Copyright 2011, Ali Onur Uyar"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.12"
__maintainer__ = "Ali Onur Uyar"
__email__ = "aouyar at gmail.com"
__status__ = "Development"
defaultHTTPport = 80
defaultHTTPSport = 443
class PHPfpmInfo:
"""Class to retrieve stats from APC from Web Server."""
def __init__(self, host=None, port=None, user=None, password=None,
monpath=None, ssl=False):
"""Initialize URL for PHP FastCGI Process Manager status page.
@param host: Web Server Host. (Default: 127.0.0.1)
@param port: Web Server Port. (Default: 80, SSL: 443)
@param user: Username. (Not needed unless authentication is required
to access status page.
@param password: Password. (Not needed unless authentication is required
to access status page.
@par | am monpath: PHP FPM path relative to Document Root.
(Default: fpm_status.php)
@param ssl: Use SSL if True. (Default: False)
"""
if host is not None:
self._host = host
else:
self._host = '127.0.0.1'
if port is not None:
self._port = int(port)
else:
if ssl:
self._port = defaultHTTPSport
else:
self._port = defaultHTTPport
| self._user = user
self._password = password
if ssl:
self._proto = 'https'
else:
self._proto = 'http'
if monpath:
self._monpath = monpath
else:
self._monpath = 'fpm_status.php'
def getStats(self):
"""Query and parse Web Server Status Page.
"""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port,
self._monpath)
response = util.get_url(url, self._user, self._password)
stats = {}
for line in response.splitlines():
mobj = re.match('([\w\s]+):\s+(\w+)$', line)
if mobj:
stats[mobj.group(1)] = util.parse_value(mobj.group(2))
return stats
|
brahle/fitmarket-python-api | test/test_status_api.py | Python | apache-2.0 | 2,605 | 0.001543 | # coding: utf-8
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import fitmarket_api
from fitmarket_api.rest import ApiException
from fitmarket_api.apis.status_api import StatusApi
class TestStatusApi(unittest.TestCase):
""" StatusApi unit test stubs """
def setUp(self):
self.api = fitmarket_api.apis.status_api.StatusApi()
def tearDown(self):
pass
def test_actual_state_get(self):
"""
Test case for actual_state_get
Dohvaca JSON | sa trenutnim cijenama svih dionica.
"""
pass
def test_mystate_get(self):
"""
Test case for mystate_get
Dohvaca JSON koji prikazuje korisnikovu ukupnu vrijednost, neinvestiranu vrijednost i vrijednosti investirane u dionice.
"""
pass
def test_plot_txt_get(self):
"""
Test case | for plot_txt_get
Dohvaca CSV sa cijenama svih dionica u svim prijasnjim mjerenjima.
"""
pass
if __name__ == '__main__':
unittest.main()
|
jenniferwx/Programming_Practice | FindNextHigherNumberWithSameDigits.py | Python | bsd-3-clause | 578 | 0.020761 | '''
Given a number, find the next higher number using only the digits in the given number.
For example if the given number is 1234, next higher number with same digits is 1243
'''
def F | indNext(num):
number = str(num)
length = len(number)
| for i in range(length-2,-1,-1):
current = number[i]
right = number[i+1]
if current < right:
temp = sorted(number[i:])
Next = temp[temp.index(current)+1]
temp.remove(Next)
temp = ''.join(temp)
return int(number[:i]+Next+temp)
return num
|
ThomasMarcel/webapp-course | resources/gae-boilerplate/bp_includes/external/babel/messages/tests/checkers.py | Python | apache-2.0 | 12,764 | 0.012614 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from datetime import datetime
import time
import unittest
from StringIO import StringIO
from babel import __version__ as VERSION
from babel.core import Locale, UnknownLocaleError
from babel.dates import format_datetime
from babel.messages import checkers
from babel.messages.plurals import PLURALS
from babel.messages.pofile import read_po
from babel.util import LOCALTZ
class CheckersTestCase(unittest.TestCase):
# the last msgstr[idx] is always missing except for singular plural forms
def test_1_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 1]:
try:
locale = Locale.parse(_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (ur"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
""" % dict(locale = _locale,
english_name = locale.english_name,
version = VERSION,
year = time.strftime('%Y'),
date = format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals = PLURALS[_locale][0],
plural_expr = PLURALS[_locale][0])).encode('utf-8')
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(StringIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_2_num_plurals_checkers(self):
# in this testcase we add an extra msgstr[idx], we should be
# disregarding it
for _locale in [p for p in PLURALS if PLURALS[p][0] == 2]:
if _locale in ['nn', 'no']:
_locale = 'nn_NO'
num_plurals = PLURALS[_locale.split('_')[0]][0]
plural_expr = PLURALS[_locale.split('_')[0]][1]
else:
num_plurals = PLURALS[_locale][0]
plural_expr = PLURALS[_locale][1]
try:
locale = Locale(_locale)
date = format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale)
except UnknownLocaleError:
# Just an alias? Not what we're testing here, let's continue
continue
po_file = (ur"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as th | e TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Conte | nt-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
msgstr[2] ""
""" % dict(locale = _locale,
english_name = locale.english_name,
version = VERSION,
year = time.strftime('%Y'),
date = date,
num_plurals = num_plurals,
plural_expr = plural_expr)).encode('utf-8')
# we should be adding the missing msgstr[0]
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(StringIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_3_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 3]:
po_file = r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "foobars"
msgstr[0] ""
msgstr[1] ""
""" % dict(locale = _locale,
english_name = Locale.parse(_locale).english_name,
version = VERSION,
year = time.strftime('%Y'),
date = format_datetime(datetime.now(LOCALTZ),
'yyyy-MM-dd HH:mmZ',
tzinfo=LOCALTZ, locale=_locale),
num_plurals = PLURALS[_locale][0],
plural_expr = PLURALS[_locale][0])
# This test will fail for revisions <= 406 because so far
# catalog.num_plurals was neglected
catalog = read_po(StringIO(po_file), _locale)
message = catalog['foobar']
checkers.num_plurals(catalog, message)
def test_4_num_plurals_checkers(self):
for _locale in [p for p in PLURALS if PLURALS[p][0] == 4]:
po_file = r"""\
# %(english_name)s translations for TestProject.
# Copyright (C) 2007 FooBar, Inc.
# This file is distributed under the same license as the TestProject
# project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
msgid ""
msgstr ""
"Project-Id-Version: TestProject 0.1\n"
"Report-Msgid-Bugs-To: bugs.address@email.tld\n"
"POT-Creation-Date: 2007-04-01 15:30+0200\n"
"PO-Revision-Date: %(date)s\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: %(locale)s <LL@li.org>\n"
"Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel %(version)s\n"
#. This will be a translator comment,
#. that will include several lines
#: project/file1.py:8
msgid "bar"
msgstr ""
#: project/file2.py:9
msgid "foobar"
msgid_plural "fo |
harikvpy/django-popupcrud | demo/library/migrations/0002_auto_20170919_0319.py | Python | bsd-3-clause | 2,833 | 0.003177 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-19 03:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AuthorRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', '1 Star'), ('2', '2 Stars'), ('3', '3 Stars'), ('4', '4 Stars')], max_length=1, verbose_name='Rating')),
],
options={
'ordering': ('author',),
| 'verbose_name': 'Author Ratings',
},
| ),
migrations.CreateModel(
name='BookRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', '1 Star'), ('2', '2 Stars'), ('3', '3 Stars'), ('4', '4 Stars')], max_length=1, verbose_name='Rating')),
],
options={
'ordering': ('book',),
'verbose_name': 'Book Ratings',
},
),
migrations.AlterModelOptions(
name='author',
options={'ordering': ('name',), 'verbose_name': 'Author', 'verbose_name_plural': 'Authors'},
),
migrations.AlterModelOptions(
name='book',
options={'ordering': ('title',), 'verbose_name': 'Book', 'verbose_name_plural': 'Books'},
),
migrations.AlterField(
model_name='author',
name='age',
field=models.SmallIntegerField(blank=True, null=True, verbose_name='Age'),
),
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(max_length=128, verbose_name='Name'),
),
migrations.AlterField(
model_name='author',
name='penname',
field=models.CharField(max_length=128, verbose_name='Pen Name'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=128, verbose_name='Title'),
),
migrations.AddField(
model_name='bookrating',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Book'),
),
migrations.AddField(
model_name='authorrating',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Author'),
),
]
|
whav/hav | src/hav/apps/webassets/management/commands/recreate_webassets.py | Python | gpl-3.0 | 3,208 | 0.000935 | from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.template.defaultfilters import filesizeformat
from hav.apps.media.models import Media
from hav.apps.hav_collections.models import Collection
from hav.apps.archive.models import ArchiveFile
from ...tasks import create
from ...models import WebAsset
class Command(BaseCommand):
help = "Forces the recreation of webassets."
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Only display which files would be affected.",
)
parser.add_argument(
"--media",
type=int,
default=[],
action="append",
help="Limit to media with given pk",
)
parser.add_argument(
"--collection",
type=str,
default=[],
action="append",
help="Limit to media in specific collection",
)
parser.add_argument(
"--extension",
type=str,
action="append",
default=[],
help="Filter by file extension (archived file)",
)
def get_queryset(self, media_ids, collection_slugs, extensions):
# start by filtering media
media = Media.objects.all()
if len(media_ids):
media = Media.objects.filter(pk__in=media_ids)
if len(collection_slugs):
collections = Collection.objects.filter(slug__in=collection_slugs)
media = media.filter(collection__in=collections)
# now move down to the archived files
archived_files = (
ArchiveFile.objects.filter(media__in=media)
.prefetch_related("media_set", "media_set__collection")
.order_by("media__set__id")
)
if len(extensions):
q = Q()
for ext in extensions:
q |= Q(original_filename__iendswith=ext) | Q(file__ends | with=ext)
archived_files = archived_files.filter(q)
return archived_files
def process_file(self, archived_file):
a | rchived_file.webasset_set.all().delete()
create.delay(archived_file.pk)
def handle(self, *args, **options):
# gather all options to limit the resulting queryset
media_ids = options.get("media", [])
collection_slugs = options.get("collection", [])
extensions = options.get("extension", [])
archived_files = self.get_queryset(media_ids, collection_slugs, extensions)
af_count = archived_files.count()
self.stdout.write(f"Operating {af_count} files.")
dry_run = options.get("dry_run")
for af in archived_files:
self.stdout.write(
f"Processing file {af.file} (original name: {af.original_filename}, media: {af.media_set.get().id}, size: {filesizeformat(af.size)}, collection: {af.media_set.get().collection.slug})"
)
if not dry_run:
self.process_file(af)
self.stdout.write(f"Processed {af_count} files.")
|
qedsoftware/commcare-hq | corehq/apps/accounting/migrations/0019_remove_softwareplanversion_product_rates.py | Python | bsd-3-clause | 392 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrati | ons.Migration):
dependencies = [
('accounting', '0018_datamigration_product_rates_to_ | product_rate'),
]
operations = [
migrations.RemoveField(
model_name='softwareplanversion',
name='product_rates',
),
]
|
BeenzSyed/tempest | tempest/services/compute/xml/tenant_usages_client.py | Python | apache-2.0 | 1,915 | 0 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from t | empest.services.compute.xml.common import xml_to_json
class TenantUsagesClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(TenantUsagesClientXML, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_type
def _parse_array(self, node):
json = xml_to_json(node)
return json
def list_tenant_usages(self, params=None):
url = 'os-simple-tenant-usage'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage['tenant_usage']
def get_tenant_usage(self, tenant_id, params=None):
url = 'os-simple-tenant-usage/%s' % tenant_id
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage
|
OtagoPolytechnic/LanguageCards | admin/wordproject/migrations/0002_auto_20160331_1111.py | Python | mit | 464 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-30 22:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migrat | ion):
dependencies = [
('wordproject', '0001_initial'),
]
operations = [
migrations.AlterFi | eld(
model_name='wordrecord',
name='description',
field=models.TextField(max_length=200, null=True),
),
]
|
IPFR33LY/EverwingHax | Everwing_data.py | Python | mit | 8,827 | 0.002606 | from ast import literal_eval
from json import dumps, loads
from urllib2 import Request, urlopen
from requests import post, get
from p3lzstring import LZString
def user_input(data):
i = 0
while i < len(data):
if 'xp' in data[i]['dragondata']['sidekick_name'][9:][5:]:
data[i]['dragondata']['value'] = data[i]['dragondata']['maximum']
print (data[i]['dragondata']['value'])
else:
if 'maturity' in data[i]['dragondata']['sidekick_name'][9:][5:]:
data[i]['dragondata']['value'] = data[i]['dragondata']['maximum']
print (data[i]['dragondata']['value'])
i = i + 1
return data
def lz_string_decode(lzstring):
lz_object = LZString.decompressFromBase64(lzstring)
return lz_object
def dict_loop(p, check_list, scheme_pid):
i = 0
while i < len(state_dict['instances']):
for key in state_dict['instances'].iterkeys():
if p in key:
return state_dict['instances'][key]
i = i + 1
return 'Found Nothing'
def build_xpmat_list(state_dict):
i = 0
while i < len(state_dict['instances']):
list = []
for key in state_dict['instances'].iterkeys():
pg = float((float(i) / float(len(state_dict['instances'])) * float(100)))
# update_progress(pg)
schemePID = state_dict['instances'][str(key)]['schemaPrimativeID']
dict_index = state_dict['instances'][str(key)]
if 'stats' in dict_index.keys() and 'sidekick' in schemePID:
check_list = []
stat_keys = dict_index['stats']
for stats in stat_keys:
data = dict_loop(stats, check_list, schemePID)
check_list.append(schemePID)
if 'maturity' in data['schemaPrimativeID']:
list.append({'Maturity': data})
if 'xp' in data['schemaPrimativeID']:
list.append({'XP': data})
i = i + 1
print "%s / %s" % (i, len(state_dict['instances']))
return list
def conv2Json(jsonString, *args, **kwargs):
jsonObject = literal_eval(jsonString)
return jsonObject
def conv2JsonStr(jsonObject):
jsonString = dumps(dumps(jsonObject))
return jsonString
def ever_wing_token():
req = Request("https://wintermute-151001.appspot.com/game/session/everwing/" + uID)
response = urlopen(req)
data = response.read()
Token = conv2Json(data)
return Token
def ever_wing_defaultaction():
return
def lz_string_encode(object):
lzObject = LZString.compressToBase64(object)
print (lzObject)
return lzObject
def default_state():
url = 'https://wintermute-151001.appspot.com'
gamestate_url = '/game/state/everwing/default/' + uID
state = get(url + gamestate_url)
return state.content
def post_to_winter(user_data, Token):
user_data = unicode(user_data)
headers = {"Host": "wintermute-151001.appspot.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json;charset=utf-8",
"x-wintermute-session": str(Token['token']),
"Connection": "keep-alive"}
print (user_data)
print (headers)
post_data = post('https://wintermute-151001.appspot.com/game/action', data=user_data, headers=headers)
return
def rebuild_loop(p, list, x, maturity, XP):
i = 0
if maturity == 'Maturity':
while i < len(state_dict):
for key in state_dict['instances'].iterkeys():
if p in key:
state_dict['instances'][key] = list[x][maturity]
i = i + 1
if XP == 'XP':
while i < len(state_dict):
for key in state_dict['instances'].iterkeys():
if p in key:
| state_dict['instances'][key] = list[x][XP]
i = i | + 1
return 'THIS IS IT'
def build_state_dict(list):
i = 0
while i < len(list):
try:
if list[i]["Maturity"]:
key_index = list[i]['Maturity']['key']
rebuild_loop(key_index, list, i, maturity='Maturity', XP=2)
pass
except KeyError:
if list[i]['XP']:
key_index = list[i]['XP']['key']
rebuild_loop(key_index, list, i, XP='XP', maturity=3)
i = i + 1
print '%s / %s' % (i, len(list))
return
def fuck_dat_currency():
for instance in state_dict['instances']:
try:
if state_dict['instances'][instance]['schemaPrimativeID'] == "currency:trophy":
state_dict['instances'][instance]['balance'] = 999999
if state_dict['instances'][instance]['schemaPrimativeID'] == "currency:coin":
state_dict['instances'][instance]['balance'] = 999999
except Exception as e:
print "%s" % e
return
def rebuild_state(list, state_dict):
i = 0
while i < len(list):
if list[i]['Maturity']['value']:
list[i]['Maturity']['value'] = 3
if list[i]['Maturity']['value'] == 3:
list[i + 1]['XP']['value'] = 125800
list[i + 1]['XP']['maximum'] = 125800
if list[i]['Maturity']['value'] == 2:
list[i + 1]['XP']['value'] = 62800
list[i + 1]['XP']['maximum'] = 62800
if list[i]['Maturity']['value'] == 1:
list[i + 1]['XP']['value'] = 62800
list[i + 1]['XP']['maximum'] = 62800
i = i + 2
return list
def get_dat_toonies():
characterStrings = ['character:lenore_item_character', 'character:coin_item_character',
'character:sophia_item_character', 'character:jade_item_character',
'character:arcana_item_character', 'character:fiona_item_character',
'character:standard_item_character', 'character:magnet_item_character']
for instance in state_dict['instances']:
try:
if state_dict['instances'][instance]['schemaPrimativeID'] in characterStrings:
characterStat = state_dict['instances'][instance]['stats'][0]
state_dict['instances'][characterStat]['value'] = state_dict['instances'][characterStat]['maximum']
if state_dict['instances'][instance]['state'] == 'locked':
state_dict['instances'][instance]['state'] = 'idle'
except Exception:
print (Exception.message)
return
if __name__ == '__main__':
uID = raw_input('Please Input User ID: ')
user_data = loads(default_state())
state = user_data['state'][11:]
print (state)
state = lz_string_decode(str(state))
state_json_str = conv2Json(state)
state_dict = loads(state_json_str)
input = raw_input('Do you wanna upgrade all current Dragons? (Y/n)')
if input == 'Y':
build_state_dict(rebuild_state(build_xpmat_list(state_dict), state_dict))
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
input = raw_input('Do you wanna fuck da currency? (Y/n)')
if input == 'Y':
fuck_dat_currency()
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
input = raw_input('Do you want all Characters / level 50? (Y/N)')
if input == 'Y':
get_dat_toonies()
else:
print('-------------------------------')
print("You must enter a 'Y' or 'n'!!")
print('-------------------------------')
a = open('statefile.txt', 'w')
a |
ramonsaraiva/sgce | sgce/person/views.py | Python | mit | 1,162 | 0.024138 | # -*- coding: utf-8 -*
from django.views.generic.edit import CreateView
from person.models import Person
from person.forms import PersonForm
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
class PersonCreate(CreateView):
model = Person
form_class = PersonForm
template_name = 'person/signup.html'
success_url = '/person/login/'
def login(request):
context = {}
if request.method == 'POST':
auth_logout(request)
username = request.POST['userna | me']
password = request.POST['password']
if not request.user.is_authenticated():
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth_login(request, user);
if user.stype == 'P':
return redirect('/sgceusr/')
elif user.stype == 'O' or user.stype == 'R':
| return redirect('/sgceman/')
else:
context['error'] = 'Usuário inativo'
else:
context['error'] = 'Usuário ou senha incorretos'
return render_to_response('person/login.html', context, RequestContext(request))
|
fraserphysics/F_UNCLE | F_UNCLE/Models/Ptw.py | Python | gpl-2.0 | 14,166 | 0.000141 | # !/usr/bin/pthon2
"""Preston-Tonks-Wallace Flow Stress Model
This module implements the PTW flow stress model
Authors
-------
- Stephen A. Andrews (SA)
- Diane E. Vaughan (DEV)
Version History
---------------
0.0: 13/05/2016 - Initial class creation
References
----------
[1] Preston, D. L.; Tonks, D. L. & Wallace, D. C. Model of plastic deformation
for extreme loading conditions Journal of Applied Physics, 2003, 93, 211-220
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# =========================
# Python Standard Libraries
# =========================
import sys
import os
import unittest
from math import pi, erf, log
import pdb
# =========================
# Python Installed Packages
# =========================
import numpy as np
import matplotlib.pyplot as plt
# =========================
# Module Packages
# =========================
from ..Utils.Struc import Struc
from ..Utils.PhysicsModel import PhysicsModel
# =========================
# Main Code
# =========================
class Ptw(PhysicsModel):
"""PTW Flow stress model
**Usage**
1. Instantiate a Ptw object with desired options
2. *optional* set the options as desired
3. Call the Ptw object with a valid temperature, strain rate and material
specification
4. Call the object again, material must be specified each time
"""
def __init__(self, name="Ptw flow stress", *args, **kwargs):
"""Instantiates the structure
Args:
name(str): Name of the structure
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Return:
None
"""
def_opts = {
'materials': [list,
['Cu', 'U', 'Ta', 'V', 'Mo', 'Be', 'SS_304',
'SS_21-6-9'],
None, None, 'None', 'List of available materials'],
'temp_check': [float, 300.0, 0.0, 2E3, 'K',
'Dummy option to check temp input'],
'str_rt_chk': [float, 0.0, 0.0, 1E12, 's**-1',
'Dummy option to check strain rate input'],
'matname': [(str), 'Cu', None, None, '-',
'Current material specification'],
'rho': [float, float('nan'), 0, None, 'kg/m**3',
'Density'],
'm_weight': [float, float('nan'), 0, None, 'g/gmole',
'Molecular weight'],
'Tm': [float, float('nan'), 0, None, '??',
'Melt temperature'],
'theta': [float, float('nan'), 0, None, '??',
'Ptw theta'],
'p': [float, float('nan'), 0, None, '??',
'Ptw p'],
's_o': [float, float('nan'), 0, None, '??',
'Flow stress at zero temperature'],
's_inf': [float, float('nan'), 0, None, '??',
'Flow stress at infinite temperature'],
'kappa': [float, float('nan'), 0, None, '??',
'Ptw kappa'],
'gamma': [float, float('nan'), 0, None, '??',
'Ptw gamma'],
'y_o': [float, float('nan'), 0, None, '??',
'Yield stress at zero temperature'],
'y_inf': [float, float('nan'), 0, None, '??',
'Yield stress at infinite temperature'],
'y_1': [float, float('nan'), 0, None, '??',
'Ptw y_1'],
'y_2': [float, float('nan'), 0, None, '??',
'Ptw y_2'],
'beta': [float, float('nan'), 0, None, '??',
'Ptw beta'],
'G_o': [float, float('nan'), 0, None, '??',
'Shear modulus at zero temperature'],
'alpha': [float, float('nan'), 0, None, '??',
'Ptw alpha'],
'alpha_p': [float, float('nan'), 0, None, '??',
'Ptw alpha p']}
Struc.__init__(self, name, def_opts=def_opts, *args, **kwargs)
def __call__(self, temp, strain_rate, material, **overrides):
"""Solves for the yield stress and flow stress at the given condition
Args:
temp(float): Temperature, in degrees Kelvin
strain_rate(float): Strain rate, in sec**-1
material(str): Key for material type
Keyword Args:
**overrides(dict): Passed as a chain of keyword arguments. These
arguments override any material property
Return:
flow_stress(float): Flow stress in ??Pa??
yield_stress(float): Yield stress in ??Pa??
"""
g_modu, t_norm, psi_norm = self.__pre__(temp, strain_rate, material,
**overrides)
s_o = self.get_option('s_o')
s_inf = self.get_option('s_inf')
kappa = self.get_option('kappa')
beta = self.get_option('beta')
y_o = self.get_option('y_o')
y_inf = self.get_option('y_inf')
y_1 = self.get_option('y_1')
y_2 = self.get_option('y_2')
if psi_norm == 0.0:
erf_psi_norm = 0.0
else:
erf_psi_norm = erf(kappa * t_norm * log(psi_norm**(-1)))
# end
glide_flow_stress = s_o | - (s_o - s_inf) * erf_psi_norm
shock_flow_stress = s_o * psi_norm**beta
glide_yield_stress = y_o - (y_o - y_inf) * erf_psi_norm
shock_yield_stress = y_1 * psi_norm**y_2
flow_stress = max((glide_flow_stress, sh | ock_flow_stress))
yield_stress = max((glide_yield_stress,
min((shock_yield_stress, shock_flow_stress))))
flow_stress *= g_modu
yield_stress *= g_modu
return flow_stress, yield_stress
def get_stress_strain(self, temp, strain_rate, material, min_strain=0.05,
max_strain=0.7, **overrides):
"""Returns the stress strain relationship for the material
"""
t_s, t_y = self(temp, strain_rate, material, **overrides)
g_modu, t_norm, psi_norm = self.__pre__(temp, strain_rate, material,
**overrides)
t_s /= g_modu
t_y /= g_modu
s_o = self.get_option('s_o')
p = self.get_option('p')
theta = self.get_option('theta')
ratio = p * (t_s - t_y) / (s_o - t_y)
strain = np.linspace(min_strain, max_strain, 100)
stress = -p * theta * strain / ((s_o - t_y) * (np.exp(ratio) - 1))
stress = np.exp(stress)
stress *= (1 - np.exp(-ratio))
stress = np.log(1 - stress)
stress *= p**-1 * (s_o - t_y)
stress += t_s
stress *= g_modu
# if np.any(np.isnan(stress)): pdb.set_trace()
return stress, strain
def get_stress(self, strain, strain_rate, temp, material, **overrides):
"""Returns the stress in the material material
Args:
strain(float or np.array): The strain in the material
strain_rate(float): The train rate in the material
temp(float): The temperature of the material
mat(str): A valid material specifier
Keyword Args:
valid materials properties can be passed as kwargs to
override default values
Return:
(float or np.array): The stress in the material
"""
t_s, t_y = self(temp, strain_rate, material, **overrides)
g_modu = self.__pre__(temp, strain_rate, material, **overrides)[0]
t_s /= g_modu
t_y /= g_modu
s_o = self.get_option('s_o')
p = self.get_option('p')
theta = self.get_option('theta')
ratio = p * (t_s - t_y) / (s_o - t_y)
poisson = 0.23
yeild_strain = t_y / (g_modu * 2 * (1 + poisson))
if strain < yeild_strain:
stress = strain * g_modu * 2 * (1 + poisson)
else:
stress = -p * theta * strain / ((s_o - t_y) * (np.exp(ratio) - 1))
stress = np.ex |
dabura667/electrum | lib/transaction.py | Python | mit | 35,844 | 0.003348 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from . import bitcoin
from .bitcoin import *
from .util import print_error, profiler, to_string
from . import bitcoin
from .bitcoin import *
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import random
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
class EnumException(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumException("enum name is not a string: " + x)
if not isinstance(i, int):
raise EnumException("enum value is not an integer: " + i)
if x in uniqueNames:
raise EnumException("enum name is not unique: " + x)
if i in uniqueValues:
raise EnumException("enum value is not unique for " + x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
| self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from | bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQU |
dualphase90/Learning-Neural-Networks | deep_net.py | Python | mit | 2,387 | 0.016757 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidde | n_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,o | utput_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
|
mrachinskiy/jewelcraft | lib/ui_lib.py | Python | gpl-3.0 | 1,174 | 0 | # ##### BEGIN GP | L LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2022 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed | in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
def popup_list(op, title: str, msgs: list, icon: str = "INFO") -> None:
def draw(self, context):
for text in msgs:
self.layout.label(text=text)
op.report({"INFO"}, f"{title}:")
for text in msgs:
op.report({"INFO"}, text)
bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)
|
SNoiraud/gramps | gramps/gen/filters/rules/repository/_matchesnamesubstringof.py | Python | gpl-2.0 | 1,903 | 0.005255 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Helge Herz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "Repositories having a name that contain a substring"
#-------------------------------------------------------------------------
class MatchesNameSubstring | Of(Rule):
"""Repository name containing <substring>"""
labels = [ _('Text:')]
name = _('Repositories with name containing <text>')
description = _("Matches repositories whose name contains a certain s | ubstring")
category = _('General filters')
allow_regex = True
def apply(self, db, repository):
""" Apply the filter """
return self.match_substring(0, repository.get_name())
|
DEVSENSE/PTVS | Python/Templates/Web/ProjectTemplates/Python/Web/PollsFlaskJade/PollsFlaskJade/models/azuretablestorage.py | Python | apache-2.0 | 4,285 | 0.0014 | """
Repository of polls that stores data in Azure Table Storage.
"""
from azure import WindowsAzureMissingResourceError
from azure.storage import TableService
from . import Poll, Choice, PollNotFound
from . import _load_samples_json
def _partition_and_row_to_key(partition, row):
"""Builds a poll/choice key out of azure table partition and row keys."""
return partition + '_' + row
def _key_to_partition_and_row(key):
"""Parses the azure table partition and row keys from the poll/choice
key."""
partition, _, row = key.partition('_')
return partition, row
def _poll_from_entity(entity):
"""Creates a poll object from the azure table poll entity."""
return Poll(
_partition_and_row_to_key(entity.PartitionKey, entity.RowKey),
entity.Text
)
def _choice_from_entity(entity):
"""Creates a choice object from the azure table choice entity."""
return Choice(
_partition_and_row_to_key(entity.PartitionKey, entity.RowKey),
entity.Text,
entity.Votes
)
class Repository(object):
"""Azure Table Storage repository."""
def __init__(self, settings):
"""Initializes the repository with the specified settings dict.
Required settings are:
- STORAGE_NAME
- STORAGE_KEY
- STORAGE_TABLE_POLL
- STORAGE_TABLE_CHOICE
"""
self.name = 'Azure Table Storage'
self.storage_name = settings['STORAGE_NAME']
self.storage_key = settings['STORAGE_KEY']
self.poll_table = settings['STORAGE_TABLE_POLL']
self.choice_table = settings['STORAGE_TABLE_CHOICE']
self.svc = TableService(self.storage_name, self.storage_key)
self.svc.create_table(self.poll_table)
self.svc.create_table(self.choice_table)
def get_polls(self):
"""Returns all the polls from the repository."""
poll_entities = self.svc.query_entities(self.poll_table)
polls = [_poll_from_entity(entity) for entity in poll_entities]
return polls
def get_poll(self, poll_key):
"""Returns a poll from the repository."""
try:
partition, row = _key_to_partition_and_row(poll_key)
poll_entity = self.svc.get_entity(self.poll_table, partition, row)
choice_entities = self.svc.query_entities(
self.choice_table,
"PollPartitionKey eq '{0}' and PollRowKey eq '{1}'" \
.format(partition, row)
)
poll = _poll_from_entity(poll_entity)
poll.choices = [_choice_from_entity(choice_entity)
for choice_entity in choice_entities]
return poll
except WindowsAzureMissingResourceError:
raise PollNotFound()
def increment_vote(self, poll_key, choice_key):
"""Increment the choice vote count for the specified poll."""
try:
partition, row = _key_to_partition_and_row(choice_key)
entity = self.svc.get_entity(self.choice_table, partition, row)
entity.Votes += 1
| self.svc.update_entity(self.choice_table, partition, row, entity)
except WindowsAzureMissingResourceError:
raise PollNotFound()
def add_sample_polls(self):
"""Adds a set of polls from data stored in a samples.json file."""
poll_partition = '2014'
poll_row = 0
choice_partition = '2014'
choice_row = 0
for sample_poll | in _load_samples_json():
poll_entity = {
'PartitionKey': poll_partition,
'RowKey': str(poll_row),
'Text': sample_poll['text'],
}
self.svc.insert_entity(self.poll_table, poll_entity)
for sample_choice in sample_poll['choices']:
choice_entity = {
'PartitionKey': choice_partition,
'RowKey': str(choice_row),
'Text': sample_choice,
'Votes': 0,
'PollPartitionKey': poll_partition,
'PollRowKey': str(poll_row),
}
self.svc.insert_entity(self.choice_table, choice_entity)
choice_row += 1
poll_row += 1
|
cpaulik/xray | xray/core/dataarray.py | Python | apache-2.0 | 41,458 | 0.000169 | import contextlib
import functools
import warnings
import numpy as np
import pandas as pd
from ..plot.plot import _PlotMethods
from . import indexing
from . import groupby
from . import ops
from . import utils
from . import variable
from .alignment import align
from .common import AbstractArray, BaseDataObject
from .coordinates import DataArrayCoordinates, Indexes
from .dataset import Dataset
from .pycompat import iteritems, basestring, OrderedDict, zip
from .utils import FrozenOrderedDict
from .variable import as_variable, _as_compatible_data, Coordinate
from .formatting import format_item
def _infer_coords_and_dims(shape, coords, dims):
"""All the logic for creating a new DataArray"""
if (coords is not None and not utils.is_dict_like(coords)
and len(coords) != len(shape)):
raise ValueError('coords is not dict-like, but it has %s items, '
'which does not match the %s dimensions of the '
'data' % (len(coords), len(shape)))
if isinstance(dims, basestring):
dims = [dims]
if dims is None:
dims = ['dim_%s' % n for n in range(len(shape))]
if coords is not None and len(coords) == len(shape):
# try to infer dimensions from coords
if utils.is_dict_like(coords):
dims = list(coords.keys())
else:
for n, (dim, coord) in enumerate(zip(dims, coords)):
if getattr(coord, 'name', None) is None:
coord = as_variable(coord, key=dim).to_coord()
dims[n] = coord.name
else:
for d in dims:
if not isinstance(d, basestring):
raise TypeError('dimension %s is not a string' % d)
if coords is not None and not utils.is_dict_like(coords):
# ensure coordinates have the right dimensions
coords = [Coordinate(dim, coord, getattr(coord, 'attrs', {}))
for dim, coord in zip(dims, coords)]
if coords is None:
coords = {}
elif not utils.is_dict_like(coords):
coords = OrderedDict(zip(dims, coords))
return coords, dims
class _LocIndexer(object):
def __init__(self, data_array):
self.data_array = data_array
def _remap_key(self, key):
def lookup_positions(dim, labels):
index = self.data_array.indexes[dim]
return indexing.convert_label_indexer(index, labels)
if utils.is_dict_like(key):
return dict((dim, lookup_positions(dim, labels))
for dim, labels in iteritems(key))
else:
# expand the indexer so we can handle Ellipsis
key = indexing.expanded_indexer(key, self.data_array.ndim)
return tuple(lookup_positions(dim, labels) for dim, labels
in zip(s | elf.data_array.dims, key))
def __getitem__(self, key):
return self.data_array[self._remap_key(key)]
def __setitem__(self, key, value):
self.data_array[self._remap_key(key)] = value
class DataArray(AbstractArray, BaseDataObject):
"""N-dimensional array with labeled coordinates and dimensions.
DataArray provides a wrapper around numpy ndarrays that uses labeled
dimensions and coordinat | es to support metadata aware operations. The API is
similar to that for the pandas Series or DataFrame, but DataArray objects
can have any number of dimensions, and their contents have fixed data
types.
Additional features over raw numpy arrays:
- Apply operations over dimensions by name: ``x.sum('time')``.
- Select or assign values by integer location (like numpy): ``x[:10]``
or by label (like pandas): ``x.loc['2014-01-01']`` or
``x.sel(time='2014-01-01')``.
- Mathematical operations (e.g., ``x - y``) vectorize across multiple
dimensions (known in numpy as "broadcasting") based on dimension names,
regardless of their original order.
- Keep track of arbitrary metadata in the form of a Python dictionary:
``x.attrs``
- Convert to a pandas Series: ``x.to_series()``.
Getting items from or doing mathematical operations with a DataArray
always returns another DataArray.
Attributes
----------
dims : tuple
Dimension names associated with this array.
values : np.ndarray
Access or modify DataArray values as a numpy array.
coords : dict-like
Dictionary of Coordinate objects that label values along each dimension.
name : str or None
Name of this array.
attrs : OrderedDict
Dictionary for holding arbitrary metadata.
"""
groupby_cls = groupby.DataArrayGroupBy
def __init__(self, data, coords=None, dims=None, name=None,
attrs=None, encoding=None):
"""
Parameters
----------
data : array_like
Values for this array. Must be an ``numpy.ndarray``, ndarray like,
or castable to an ``ndarray``. If a self-described xray or pandas
object, attempts are made to use this array's metadata to fill in
other unspecified arguments. A view of the array's data is used
instead of a copy if possible.
coords : sequence or dict of array_like objects, optional
Coordinates (tick labels) to use for indexing along each dimension.
If dict-like, should be a mapping from dimension names to the
corresponding coordinates.
dims : str or sequence of str, optional
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions. If this argument is omited, dimension names
are taken from ``coords`` (if possible) and otherwise default to
``['dim_0', ... 'dim_n']``.
name : str or None, optional
Name of this array.
attrs : dict_like or None, optional
Attributes to assign to the new variable. By default, an empty
attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
'units' and 'calendar' (the later two only for datetime arrays).
Unrecognized keys are ignored.
"""
# try to fill in arguments from data if they weren't supplied
if coords is None:
coords = getattr(data, 'coords', None)
if isinstance(data, pd.Series):
coords = [data.index]
elif isinstance(data, pd.DataFrame):
coords = [data.index, data.columns]
elif isinstance(data, (pd.Index, variable.Coordinate)):
coords = [data]
elif isinstance(data, pd.Panel):
coords = [data.items, data.major_axis, data.minor_axis]
if dims is None:
dims = getattr(data, 'dims', getattr(coords, 'dims', None))
if name is None:
name = getattr(data, 'name', None)
if attrs is None:
attrs = getattr(data, 'attrs', None)
if encoding is None:
encoding = getattr(data, 'encoding', None)
data = _as_compatible_data(data)
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
dataset = Dataset(coords=coords)
# insert data afterwards in case of redundant coords/data
dataset[name] = (dims, data, attrs, encoding)
for k, v in iteritems(dataset.coords):
if any(d not in dims for d in v.dims):
raise ValueError('coordinate %s has dimensions %s, but these '
'are not a subset of the DataArray '
'dimensions %s' % (k, v.dims, dims))
# these fully describe a DataArray
self._dataset = dataset
self._name = name
@classmethod
def _new_from_dataset(cls, or |
codeforanchorage/collective-development | app/mod_proposal/constants.py | Python | mit | 219 | 0.004566 | # Place within a proposal lif | e cycle
LIFE_ORIGIN = 1
LIFE_PLANNING = 2
LIFE_CLASS = 3
LIFE_FINISHED = | 4
IS_COLLECTION = 100
# Source of a proposal
SOURCE_UNKNOWN = 0
SOURCE_WEBSITE = 1
SOURCE_API = 2
SOURCE_OFFLINE = 3 |
wontfix-org/wtf | wtf/services.py | Python | apache-2.0 | 6,365 | 0.000314 | # -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Service Loading and Initialization
==================================
This module provides for service loading and initialization.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
from wtf import Error, WtfWarning
from wtf import util as _util
class ServiceError(Error):
""" Service intialization failure """
class ServiceInterfaceWarning(WtfWarning):
""" Service interface warning """
class ServiceInterface(object):
"""
Interface for global and local services, initialized at startup time
"""
def __init__(self, config, opts, args):
"""
Initialization
:Parameters:
- `config`: Configuration
- `opts`: Command line options
- `args`: Positioned command line arguments
:Types:
- `config`: `wtf.config.Config`
- `opts`: ``optparse.OptionContainer``
- `args`: ``list``
"""
def shutdown(self):
"""
Shutdown the service
This method is called when the services are no longer needed.
It can be used to release external resources etc in a clean way.
"""
def global_service(self):
"""
Return the global service object
If there's no global service provided, the method is expected to
return ``None``
:return: A tuple containing the global object the service provides
and the name which the object will be stored under in the
service module (``('name', any)``)
:rtype: ``tuple``
"""
def middleware(self, func):
"""
Middleware factory
:Parameters:
- `func`: The function to wrap (WSGI compatible callable)
:Types:
- `func`: ``callable``
:return: A WSGI callable. If the service does not
provide a WSGI middleware, the `func` argument sho | uld just
be returned, the initialized middleware (wrapping `func`)
otherwise.
:rtype: ``callable``
"""
class ServiceManager(object):
"""
Service manager
:IVariables:
- `_finalized`: Manager was finalized
- `_down`: Manager was shut down
- `_services`: List of services
:Types:
- `_finalized`: ``bool``
- `_down`: ``bool``
| - `_services`: ``list``
"""
_finalized, _down, _services = False, False, ()
def __init__(self):
""" Initialization """
self._services = []
def __del__(self):
""" Destruction """
self.shutdown()
def finalize(self):
""" Lock the manager. No more services can be added """
self._services.reverse()
self._finalized = True
def add(self, service):
""" Add a new service """
assert not self._finalized, "ServiceManager was already finalized"
self._services.append(service)
def apply(self, app):
"""
Apply the middlewares to the application
:Parameters:
- `app`: The WSGI application callable to wrap
:Types:
- `app`: ``callable``
:return: Wrapped application (if there are middlewares to apply, the
original callable otherwise)
:rtype: ``callable``
"""
assert self._finalized, "ServiceManager was not finalized yet"
assert not self._down, "ServiceManager was already shutdown"
for service in self._services:
app = service.middleware(app)
return app
def shutdown(self):
""" Shutdown the services """
self._down = True
services, self._services = self._services, []
for service in services:
try:
func = service.shutdown
except AttributeError:
ServiceInterfaceWarning.emit(
"Missing 'shutdown' method for service %r" % (service,)
)
else:
func()
def init(config, opts, args, services, module='__svc__'):
"""
Initialize services
The function can only be called once (because the module will be only
initialized once)
:Parameters:
- `config`: Configuration
- `opts`: Command line options
- `args`: Positioned command line arguments
- `services`: List of services to initialize. The list items can either
be classes (which are instanciated) or strings containing dotted class
names (which will be loaded and instanciated). Service classes must
implement the `ServiceInterface`.
- `module`: Dotted module name, where global services are put into
:Types:
- `config`: `wtf.config.Config`
- `opts`: ``optparse.OptionContainer``
- `args`: ``list``
- `services`: ``iterable``
- `module`: ``str``
:return: Service manager
:rtype: `ServiceManager`
"""
_, fresh = _util.make_dotted(module)
assert fresh, "Services already initialized"
module, manager = module.split('.'), ServiceManager()
for service in services:
if isinstance(service, basestring):
service = _util.load_dotted(str(service))
service = service(config, opts, args)
manager.add(service)
svc = service.global_service()
if svc is not None:
name, svc = svc
name = module + name.split('.')
if len(name) > 1:
(prename, _), name = _util.make_dotted(
'.'.join(name[:-1])), name[-1]
if getattr(prename, name, None) is not None:
raise ServiceError("%s.%s already exists" % (prename, name))
setattr(prename, name, svc)
manager.finalize()
return manager
|
ecreall/lagendacommun | lac/content/artist.py | Python | agpl-3.0 | 7,788 | 0.000257 | # Copyright (c) 2016 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import deform
import hashlib
from functools import reduce
from zope.interface import implementer
from substanced.schema import NameSchemaNode
from substanced.content import content
from substanced.util import get_oid
from dace.descriptors import (
CompositeUniqueProperty,
SharedUniqueProperty,
SharedMultipleProperty)
from dace.util import getSite
from pontus.core import VisualisableElementSchema
from pontus.widget import (
FileWidget
)
from pontus.file import ObjectData
from pontus.form import FileUploadTempStore
from lac import _
from lac.core import (
VisualisableElement,
SearchableEntity,
SearchableEntitySchema,
DuplicableEntity,
ParticipativeEntity)
from lac.content.interface import IArtistInformationSheet
from lac.file import Image
from lac.views.widget import RichTextWidget
from lac.utilities.duplicates_utility import (
find_duplicates_artist)
@colander.deferred
def picture_widget(node, kw):
context = node.bindings['context']
request = node.bindings['request']
tmpstore = FileUploadTempStore(request)
source = None
root = getSite()
if context is not root:
if context.picture:
source = context.picture
return FileWidget(
tmpstore=tmpstore,
source=source,
file_type=['image']
)
def context_is_a_artist(context, request):
return request.registry.content.istype(context, 'artist')
class ArtistInformationSheetSchema(VisualisableElementSchema, SearchableEntitySchema):
"""Schema for artist"""
name = NameSchemaNode(
editing=context_is_a_artist,
)
id = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title=_('Id'),
missing=""
)
title = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title=_('Title')
)
description = colander.SchemaNode(
colander.String(),
widget=deform.widget.TextAreaWidget(rows=4, cols=60),
title=_('Description'),
missing=""
)
biography = colander.SchemaNode(
colander.String(),
widget=RichTextWidget(),
title=_("Biography"),
missing=""
)
picture = colander.SchemaNode(
ObjectData(Image),
widget=picture_widget,
title=_('Picture'),
required=False,
missing=None,
)
is_director = colander.SchemaNode(
colander.Boolean(),
widget=deform.widget.CheckboxWidget(),
label=_('Is a director'),
title='',
default=False,
missing=False
)
origin_oid = colander.SchemaNode(
colander.Int(),
widget=deform.widget.HiddenWidget(),
title=_('OID'),
missing=0
)
def get_artist_data(artists, artist_schema):
result = []
for artist in artists:
artist_data = artist.get_data(artist_schema)
if artist_data['picture']:
picture = artist_data['picture']
artist_data['picture'] = picture.get_data(None)
result.append(artist_data)
return result
@content(
'artist',
icon='glyphicon glyphicon-align-left',
)
@implementer(IArtistInformationSheet)
class ArtistInformationSheet(VisualisableElement, DuplicableEntity,
ParticipativeEntity, SearchableEntity):
"""Artist information sheet class"""
type_title = _('Artist information sheet')
icon = 'glyphicon glyphicon-star'
templates = {'default': 'lac:views/templates/artist_result.pt',
'bloc': 'lac:views/templates/artist_result.pt',
'diff': 'lac:views/templates/diff_artist_template.pt',
'duplicates': 'lac:views/templates/artist_duplicates.pt'}
picture = CompositeUniqueProperty('picture')
author = SharedUniqueProperty('author', 'contents')
creations = SharedMultipleProperty('creations', 'artists')
productions = SharedMultipleProperty('productions', 'artists')
def __init__(self, **kwargs):
super(ArtistInformationSheet, self).__init__(**kwargs)
self.hash_picture = None
self.hash_artist = None
self.hash_picture_fp()
self.hash_artist_data()
@property
def id(self):
return self.get_id()
def hash_picture_fp(self):
if self.picture:
m = hashlib.md5()
picture_r = self.picture.fp.readall()
self.picture.fp.seek(0)
m.update(picture_r)
self.hash_picture = m.digest()
else:
self.hash_picture = None
@property
def related_contents(self):
result = list(self.creations)
result.extend(list(self.productions))
return result
@property
def improved_artist(self):
original = getattr(self, 'original', None)
return original if original is not self else None
def get_id(self):
return str(get_oid(self, 0))
def replace_by(self, source):
if self is not source:
creations = source.creations
productions = source.productions
connections_to = source.connections_to
for creation in self.creations:
if creation not in creations:
source.addtoproperty('creations', creation)
creation.reindex()
self.setproperty('creations', [])
for production in self.productions:
if production not in productions:
source.addtoproperty('productions', production)
production. | reindex()
self.setproperty('productions', [])
for connection in self.co | nnections_to:
if connection not in connections_to:
source.addtoproperty('connections_to', connection)
self.setproperty('connections_to', [])
for branch in self.branches:
source.addtoproperty('branches', branch)
original = self.original
if original and original is not source:
source.setproperty('original', original)
self.setproperty('original', None)
source.add_contributors(self.contributors)
self.setproperty('branches', [])
return True
return False
def reject(self):
original = self.original
if original:
self.replace_by(original)
def hash_artist_data(self):
result = self.title
result += getattr(self, 'description', '')
result += getattr(self, 'biography', '')
result += str(getattr(self, 'is_director', False))
result += str(self.hash_picture)
result = result.replace(' ', '').strip()
m = hashlib.md5()
m.update(result.encode())
self.hash_artist = m.digest()
def eq(self, other):
hash_artist = getattr(self, 'hash_artist', None)
other_hash_artist = getattr(other, 'hash_artist', None)
if hash_artist != other_hash_artist:
return False
return True
def get_more_contents_criteria(self):
"return specific query, filter values"
artists = reduce(lambda result, x: result + getattr(x, 'artists', []),
filter(lambda x: 'published' in x.state, self.creations), [])
artists = filter(lambda x: 'published' in x.state, artists)
return None, {'objects': set(artists)}
def get_duplicates(self, states=('published', )):
return find_duplicates_artist(self, states)
|
babbel/floto | tests/unit/specs/task/test_child_workflow.py | Python | mit | 2,677 | 0.00635 | import pytest
import floto.specs.task
@pytest.fixture
def child_workflow():
retry_strategy = floto.specs.retry_strategy.Strategy()
cw = floto.specs.task.ChildWorkflow(workflow_type_name='wft_name',
workflow_type_version='wft_version',
id_='wid',
domain='d',
requires=['r_id'],
input={'foo': 'bar'},
retry_strategy=retry_strategy)
return cw
@pytest.fixture
def serialized_child_workflow():
return {'domain': 'd',
'input': {'foo': 'bar'},
'id_': 'wid',
'requires': ['r_id'],
'workflow_type_version': 'wft_version',
'retry_strategy': {'type': 'floto.specs.retry_strategy.Strategy'},
'workflow_type_name': 'wft_name',
'type': 'floto.specs.task.ChildWorkflow'}
class TestChildWorkflow:
def test_init(self, child_workflow):
assert child_workflow.workflow_type_name == 'wft_name'
assert child_workflow.workflow_type_version == 'wft_version'
assert child_workflow.id_ == 'wid'
assert child_workflow.requires == ['r_id']
assert child_workflow.input == {'foo': 'bar'}
assert child_workflow.retry_strategy
def test_default_worfklow_id(self, mocker):
mocker.patch('floto.specs.task.Task._default_id', return_value='did')
cw = floto.specs.task.ChildWorkflow(workflow_type_name='wft_name',
workflow_type_version='wft_version', domain='d',
input={'foo': 'bar'})
floto.specs.task.Task._default_id.assert_called_once_with(name='wft_name',
versio | n='wft_version',
| domain='d',
input={'foo': 'bar'})
assert cw.id_ == 'did'
def test_deserialized(self, serialized_child_workflow):
cw = floto.specs.task.ChildWorkflow.deserialized(**serialized_child_workflow)
assert isinstance(cw, floto.specs.task.ChildWorkflow)
assert isinstance(cw.retry_strategy, floto.specs.retry_strategy.Strategy)
assert cw.id_ == 'wid'
assert cw.requires == ['r_id']
def test_serializable(self, child_workflow, serialized_child_workflow):
serializable = child_workflow.serializable()
assert serializable == serialized_child_workflow
|
faithsws/WWR | WeixinPlugin/WeixinPlugin.py | Python | mit | 3,338 | 0.042241 | '''
Created on 2014-9-3
@author: songwensheng
'''
import WeixinIF
from lxml import etree
import sys
import os
import web
import time
import tts
class WxTextPlugin(WeixinIF.TextPlugin):
def __init__(self,xml,ctx,usr):
WeixinIF.TextPlugin.__init__(self, xml, ctx)
self.FromUser = usr
self.ToUser = ctx.openID
self.render = web.template.render('WeixinPlugin/templates')
class InitState(WeixinIF.State):
def __init__(self,plugin,tips):
WeixinIF.State.__init__(self,plugin,tips)
def Enter(self,plugin,text):
return plugin.render.reply_InitState(plugin,"enter "+text,int(time.time()))
def Process(self,plugin,text):
return plugin.render.reply_InitState(plugin,text,int(time.time()))
def Leave(self,plugin,text):
return plugin.render.reply_InitState(plugin,"leave "+text,int(time.time()))
class TTSState(WeixinIF.State):
def __init__(self,plug | in,tips):
WeixinIF.State.__init__(self,plugin,tips)
def Enter(self,plugin,text):
return plugin.render.reply_FirstState(plugin,"enter "+text + " Mode",int(time.time()))
def Process(self,plugin,text):
file = tts.GetAac(text)
#xml = plugin.render.reply_TTS(plugin,text,"http://mc.faithsws.com/aac | /"+file,int(time.time()))
#print(xml)
#return xml
return plugin.render.reply_FirstState(plugin,file,int(time.time()))
def Leave(self,plugin,text):
return plugin.render.reply_FirstState(plugin,"leave "+text,int(time.time()))
class SecondState(WeixinIF.State):
def __init__(self,plugin,tips):
WeixinIF.State.__init__(self,plugin,tips)
def Enter(self,plugin,text):
return plugin.render.reply_SecondState(plugin,"enter "+text,int(time.time()))
def Process(self,plugin,text):
return plugin.render.reply_SecondState(plugin,text,int(time.time()))
def Leave(self,plugin,text):
return plugin.render.reply_SecondState(plugin,"leave "+text,int(time.time()))
class ThirdState(WeixinIF.State):
def __init__(self,plugin,tips):
WeixinIF.State.__init__(self,plugin,tips)
def Enter(self,plugin,text):
return plugin.render.reply_ThirdState(plugin,"enter "+text,int(time.time()))
def Process(self,plugin,text):
return plugin.render.reply_ThirdState(plugin,text,int(time.time()))
def Leave(self,plugin,text):
return plugin.render.reply_ThirdState(plugin,"leave "+text,int(time.time()))
class FourthState(WeixinIF.State):
def __init__(self,plugin,tips):
WeixinIF.State.__init__(self,plugin,tips)
def Enter(self,plugin,text):
return plugin.render.reply_FourthState(plugin,"enter "+text,int(time.time()))
def Process(self,plugin,text):
return plugin.render.reply_FourthState(plugin,text,int(time.time()))
def Leave(self,plugin,text):
return plugin.render.reply_FourthState(plugin,"leave "+text,int(time.time()))
class WxEventPlugin(WeixinIF.EventPlugin):
def __init__(self,ctx):
WeixinIF.EventPlugin.__init__(self, ctx)
def OnSubscribe(self,usr,key):
return "self.messages['subscribe'][key]";
def OnUnsubscribe(self,usr,key):
return "self.messages['unsubscribe'][key]";
def OnClick(self,usr,key):
return "self.messages['click'][key]";
|
phil65/script.home | TrailerWindow.py | Python | gpl-2.0 | 2,625 | 0.001524 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Philipp Temminghoff (philipptemminghoff@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even | the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmcaddon
import xbmcgui
from Utils import *
__addon__ = xbmcaddon.Addon()
__addonid__ = __addon__.getAddonInfo('id')
__language__ = __addon__.getLocalizedString
__addonpath__ = __addon__.getAddonInfo('path')
CONTROL_SLIDER = 101
ACTION_CONTEXT_MENU = [117]
ACTION_OSD = [122]
ACTION_PREVIOUS_MENU = [9, 92, 10]
ACTION_SHOW_INFO = [11]
ACTION_EXIT_SCRIPT = [13]
ACTION_DOWN = [4]
ACTION_UP = [3]
ACTION_LEFT = [1]
ACTION_RIGHT = [2]
ACTION_0 = [58, 18]
ACTION_PLAY = [79]
ACTION_SELECT_ITEM = [7]
LISTS = [8001, 8002, 8003, 8004, 8005, 8006, 8007]
class TrailerWindow(xbmcgui.WindowXML):
def __init__(self, skin_file, addon_path):
log('__init__')
def onInit(self, startGUI=True):
pass
def onAction(self, action):
action_id = action.getId()
if action_id in ACTION_PREVIOUS_MENU:
self.close()
elif action_id in ACTION_SHOW_INFO:
focusedcontrol = self.getFocusId()
MoveProperties(focusedcontrol, focusedcontrol)
movieid = xbmc.getInfoLabel("Container(%i).ListItem.Property(ID)" % focusedcontrol)
builtin = "RunScript(script.extendedinfo,info=extendedinfo,id=%s,imdbid=%s)" % (movieid, xbmc.getInfoLabel("Window(home).Property(imdbid)"))
xbmc.executebuiltin(builtin)
# elif action_id in ACTION_LEFT:
# for controlnumber in LISTS:
# if controlnumber != focusedcontrol:
# xbmc.executebuiltin("Control.Move(%i,-1)" % (controlnumber))
# elif action_id in ACTION_RIGHT:
# for controlnumber in LISTS:
# if controlnumber != focusedcontrol:
# xbmc.executebuiltin("Control.Move(%i,1)" % (controlnumber))
def onClick(self, controlId):
pass
def onFocus(self, controlId):
pass
|
sgp715/simple_image_annotator | app.py | Python | mit | 3,289 | 0.008209 | import sys
from os import walk
import imghdr
import csv
import argparse
from flask import Flask, redirect, url_for, request
from flask import render_template
from flask import send_file
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route('/tagger')
def tagger():
if (app.config["HEAD"] == len(app.config["FILES"])):
return redirect(url_for('bye'))
directory = app.config['IMAGES']
image = app.config["FILES"][app.config["HEAD"]]
labels = app.config["LABELS"]
not_end = not(app.config["HEAD"] == len(app.config["FILES"]) - 1)
print(not_end)
return render_template('tagger.html', not_end=not_end, directory=directory, image=image, labels=labels, head=app.config["HEAD"] + 1, len=len(app.config["FILES"]))
@app.route('/next')
def next():
image = app.config["FILES"][app.config["HEAD"]]
app.config["HEAD"] = app.config["HEAD"] + 1
with open(app.config["OUT"],'a') as f:
for label in app.config["LABELS"]:
f.write(image + "," +
label["id"] + "," +
label["name"] + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "\n")
app.config["LABELS"] = []
return redirect(url_for('tagger'))
@app.route("/bye")
def bye():
return send_file("taf.gif", mimetype='image/gif')
@app.route('/add/<id>')
def add(id):
xMin = request.args.get("xMin")
xMax = request.args.get("xMax")
yMin = request.args.get("yMin")
yMax = request.args.get("yMax")
app.config["LABELS"].append({"id":id, "name":"", "xMin":xMin, "xMax":xMax, "yMin":yMin, "yMax":yMax})
return redirect(url_for('tagger'))
@app.route('/remove/<id>')
def remove(id):
index = int(id) - 1
del app.config["LABELS"][index]
for label in app.config["LABELS"][index:]:
label["id"] = str(int(label["id"]) - 1)
return redirect(url_for('tagger'))
@app.route('/label/<id>')
def label(id):
name = request.args.get("name")
app.config["LABELS"][int(id) - 1]["name"] = name
return redirect(url_for('tagger'))
# @app.route('/prev')
# def prev():
# app.config["HEAD"] = app.config["HEAD"] - 1
# return redirect(url_for('tagger'))
@app.route('/image/<f>')
def images(f):
images = app.config['IMAGES']
return send_file(images + f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str, help='specify the images directory')
parser.add_argument("--out")
args = parser.parse_args()
directory = args.dir
if directory[len(directory) - 1] != "/":
directory += "/"
app.config["IMAGES"] = directory
app.config["LABELS"] = []
files = None
for (dirpath, dirnames, filenames) in walk(app.config["IMAGES"]):
files = filenames
break
if files == None:
print("No files")
exit()
app.config["FILES"] = files
app.config["HE | AD"] = 0
if args.out == None:
app.config["OUT"] = "out.csv"
else:
app.config["OUT"] = args.out
p | rint(files)
with open("out.csv",'w') as f:
f.write("image,id,name,xMin,xMax,yMin,yMax\n")
app.run(debug="True")
|
tiagochiavericosta/edx-platform | openedx/core/djangoapps/credit/models.py | Python | agpl-3.0 | 24,095 | 0.001494 | # -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
import datetime
from collections import defaultdict
import logging
import pytz
from django.conf import settings
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction, IntegrityError
from django.core.validators import RegexValidator
from sim | ple_history.models import HistoricalRecords
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from xmodule_django.models import CourseKeyField
from django.utils.translation import ugettext_lazy
log = lo | gging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=r"^[a-z,A-Z,0-9,\-]+$",
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
|
bmz0/project0 | test/world/circle.py | Python | gpl-3.0 | 817 | 0.00612 | # encoding: utf-8
from pygame import Surface, Rect
from pygame.draw import circle as drawCircle
from pygame.sprite import Sprite
from random import randint
from test.config import *
class circle(Sprite):
def __init__(self, x=0, y=0, *args):
Sprite.__init__(self, *args)
self.x, self.y = x | , y
self.radius = radiusMin
self.color = randint(1, 255), randint(1, 255), randint(1, 255), randint(92, 208)
self.redraw()
def redraw(self):
self.image = Surface((s | elf.radius * 2,) * 2).convert_alpha()
self.image.fill((0,) * 4)
self.rect = self.image.get_rect()
self.rect.centerx = self.x
self.rect.centery = self.y
drawCircle(self.image, (255,)*3, (self.radius, self.radius), self.radius)
drawCircle(self.image, self.color, (self.radius, self.radius), self.radius - 2)
|
odtvince/APITaxi | APITaxi/tasks/clean_geoindex.py | Python | agpl-3.0 | 1,030 | 0.005825 | from ..extensions import celery, redis_store
from ..models.taxis import Taxi
import time
from flask import current_app
@celery.task
def clean_geoindex():
keys_to_clean = []
cursor = 0
taxi_id = | set()
cursor = None
while cursor != 0:
if cursor == None:
cursor = 0
cursor, result = redis_store.scan(cursor, 'taxi:*')
pipe = redis_store.pipeline()
for key in result:
pipe.hvals(key)
values = pipe.execute()
lower_bound = int(time.time()) - 6 | 0 * 60
pipe = redis_store.pipeline()
for (key, l) in zip(result, values):
if any(map(lambda v: Taxi.parse_redis(v)['timestamp'] >= lower_bound, l)):
continue
pipe.zrem(current_app.config['REDIS_GEOINDEX'], key)
pipe.execute()
#Maybe it'll more efficient to delete some of the taxis in the global map, but
#if we do it we'll lose the information of when this taxis was active for the
#last time, it will be great to log it in database.
|
thtrieu/essence | src/modules/recurring.py | Python | gpl-3.0 | 2,099 | 0.008576 | from .module import Module
from src.utils import xavier, guass
import numpy as np
from .activations import *
class Recurring(Module):
"""
Recurring is a type of module that cache intermediate
activations in a stack during unrolling of recurrent layers
Its backward() is expected to be called | several times during
Back Propagation Through Time, either full or truncated.
"""
def __init__(self, *args, **kwargs):
self._stack = list()
self._setup(*args, **kwargs)
def _setup(*args, **kwargs):
pass
def _push(self, *objs):
objs = list(objs)
if len(objs) == 1:
objs = objs[0]
self._stac | k.append(objs)
def _pop(self):
objs = self._stack[-1]
del self._stack[-1]
return objs
def flush(self):
self._stack = list()
self._flush()
def _flush(self): pass
class gate(Recurring):
"""
Gates are special case of Recurring modules
It starts with a linear transformation (matmul)
And ends with a (typically) nonlinear activation
"""
def _setup(self, server, w_shape, bias = None,
act_class = sigmoid, transfer = None):
if transfer is None:
b_shape = (w_shape[-1],)
w_init = xavier(w_shape)
if bias is not None:
b_init = np.ones(b_shape) * bias
elif bias is None:
b_init = guass(0., 1e-1, b_shape)
else: w_init, b_init = transfer
self._act_class = act_class
self._w = server.issue_var_slot(w_init, True)
self._b = server.issue_var_slot(b_init, True)
def forward(self, x):
linear = x.dot(self._w.val) + self._b.val
act = self._act_class(None, None)
self._push(x, act)
return act.forward(linear)
def backward(self, grad):
x, act = self._pop()
linear_grad = act.backward(grad)
self._b.set_grad(linear_grad.sum(0))
self._w.set_grad(x.T.dot(linear_grad))
return linear_grad.dot(self._w.val.T) |
FermiParadox/ipy_student_exercises | never_importer.py | Python | mit | 384 | 0.002604 | # DO NOT IMPORT ANY PROJECT MODULES HERE
# to avoid circular imports.
# DO NOT MOVE THIS FILE; IT MUST REMAIN INSIDE THE PROJECT FOLDER (check below)
import os
# WARNING: project-path assumes this file is in the project folder itself.
CURRENT_F | ILE_PATH = os.path.abspath(__file__)
PROJECT_PATH = os.path.dirname(CURRENT_FILE_PATH)
PROJECT_PARENT_PATH = os.pat | h.dirname(PROJECT_PATH) |
nkoech/trialscompendium | trialscompendium/search/api/views.py | Python | mit | 109 | 0 | from .globalsearch.globalsearchviews imp | ort global_search_views
global_search_views = global | _search_views()
|
callidus/PyKMIP | kmip/tests/unit/core/factories/payloads/test_request.py | Python | apache-2.0 | 6,504 | 0 | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core.enums import Operation
from kmip.core.factories.payloads.request import RequestPayloadFactory
from kmip.core.messages.payloads import activate
from kmip.core.messages.payloads import create
from kmip.core.messages.payloads import create_key_pair
from kmip.core.messages.payloads import destroy
from kmip.core.messages.payloads import discover_versions
from kmip.core.messages.payloads import get
from kmip.core.messages.payloads import locate
from kmip.core.messages.payloads import query
from kmip.core.messages.payloads import rekey_key_pair
from kmip.core.messages.payloads import register
from kmip.core.messages.payloads import revoke
class TestRequestPayloadFactory(testtools.TestCase):
def setUp(self):
super(TestRequest | PayloadFactory, self).setUp()
self.factory = RequestPayloadFactory()
def tearDown(self):
super(TestRequestPayloadFactory, self).tearDown()
def _test_not_implemented(self, func, args):
self.assertRaises(NotImplementedError, func, args)
def _test_payload_type(self, payloa | d, payload_type):
msg = "expected {0}, received {1}".format(payload_type, payload)
self.assertIsInstance(payload, payload_type, msg)
def test_create_create_payload(self):
payload = self.factory.create(Operation.CREATE)
self._test_payload_type(payload, create.CreateRequestPayload)
def test_create_create_key_pair_payload(self):
payload = self.factory.create(Operation.CREATE_KEY_PAIR)
self._test_payload_type(
payload, create_key_pair.CreateKeyPairRequestPayload)
def test_create_register_payload(self):
payload = self.factory.create(Operation.REGISTER)
self._test_payload_type(payload, register.RegisterRequestPayload)
def test_create_rekey_payload(self):
self._test_not_implemented(
self.factory.create, Operation.REKEY)
def test_create_derive_key_payload(self):
self._test_not_implemented(
self.factory.create, Operation.DERIVE_KEY)
def test_create_certify_payload(self):
self._test_not_implemented(
self.factory.create, Operation.CERTIFY)
def test_create_recertify_payload(self):
self._test_not_implemented(
self.factory.create, Operation.RECERTIFY)
def test_create_locate_payload(self):
payload = self.factory.create(Operation.LOCATE)
self._test_payload_type(payload, locate.LocateRequestPayload)
def test_create_check_payload(self):
self._test_not_implemented(
self.factory.create, Operation.CHECK)
def test_create_get_payload(self):
payload = self.factory.create(Operation.GET)
self._test_payload_type(payload, get.GetRequestPayload)
def test_create_get_attributes_payload(self):
self._test_not_implemented(
self.factory.create, Operation.GET_ATTRIBUTES)
def test_create_get_attributes_list_payload(self):
self._test_not_implemented(
self.factory.create, Operation.GET_ATTRIBUTE_LIST)
def test_create_add_attribute_payload(self):
self._test_not_implemented(
self.factory.create, Operation.ADD_ATTRIBUTE)
def test_create_modify_attribute_payload(self):
self._test_not_implemented(
self.factory.create, Operation.MODIFY_ATTRIBUTE)
def test_create_delete_attribute_payload(self):
self._test_not_implemented(
self.factory.create, Operation.DELETE_ATTRIBUTE)
def test_create_obtain_lease_payload(self):
self._test_not_implemented(
self.factory.create, Operation.OBTAIN_LEASE)
def test_create_get_usage_allocation_payload(self):
self._test_not_implemented(
self.factory.create, Operation.GET_USAGE_ALLOCATION)
def test_create_activate_payload(self):
payload = self.factory.create(Operation.ACTIVATE)
self._test_payload_type(payload, activate.ActivateRequestPayload)
def test_create_revoke_payload(self):
payload = self.factory.create(Operation.REVOKE)
self._test_payload_type(payload, revoke.RevokeRequestPayload)
def test_create_destroy_payload(self):
payload = self.factory.create(Operation.DESTROY)
self._test_payload_type(payload, destroy.DestroyRequestPayload)
def test_create_archive_payload(self):
self._test_not_implemented(
self.factory.create, Operation.ARCHIVE)
def test_create_recover_payload(self):
self._test_not_implemented(
self.factory.create, Operation.RECOVER)
def test_create_validate_payload(self):
self._test_not_implemented(
self.factory.create, Operation.VALIDATE)
def test_create_query_payload(self):
payload = self.factory.create(Operation.QUERY)
self._test_payload_type(payload, query.QueryRequestPayload)
def test_create_cancel_payload(self):
self._test_not_implemented(
self.factory.create, Operation.CANCEL)
def test_create_poll_payload(self):
self._test_not_implemented(
self.factory.create, Operation.POLL)
def test_create_notify_payload(self):
self._test_not_implemented(
self.factory.create, Operation.NOTIFY)
def test_create_put_payload(self):
self._test_not_implemented(
self.factory.create, Operation.PUT)
def test_create_rekey_key_pair_payload(self):
payload = self.factory.create(Operation.REKEY_KEY_PAIR)
self._test_payload_type(
payload, rekey_key_pair.RekeyKeyPairRequestPayload)
def test_create_discover_versions_payload(self):
payload = self.factory.create(Operation.DISCOVER_VERSIONS)
self._test_payload_type(
payload, discover_versions.DiscoverVersionsRequestPayload)
|
bsipocz/astropy | astropy/visualization/stretch.py | Python | bsd-3-clause | 15,324 | 0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes that deal with stretching, i.e. mapping a range of [0:1] values onto
another set of [0:1] values with a transformation
"""
import numpy as np
from .transform import BaseTransform
from .transform import CompositeTransform
__all__ = ["BaseStretch", "LinearStretch", "SqrtStretch", "PowerStretch",
"PowerDistStretch", "Square | dStretch", "LogStretch", "AsinhStretch",
"SinhStretch", "HistEqStretch", "ContrastBiasStretch",
"CompositeStretch"]
def _logn(n, x, out=None):
"""Calculate the log base n of x."""
# We define this because numpy.lib.scimath.logn doesn't support out=
if out is | None:
return np.log(x) / np.log(n)
else:
np.log(x, out=out)
np.true_divide(out, np.log(n), out=out)
return out
def _prepare(values, clip=True, out=None):
"""
Prepare the data by optionally clipping and copying, and return the
array that should be subsequently used for in-place calculations.
"""
if clip:
return np.clip(values, 0., 1., out=out)
else:
if out is None:
return np.array(values, copy=True)
else:
out[:] = np.asarray(values)
return out
class BaseStretch(BaseTransform):
"""
Base class for the stretch classes, which, when called with an array
of values in the range [0:1], return an transformed array of values,
also in the range [0:1].
"""
def __add__(self, other):
return CompositeStretch(other, self)
def __call__(self, values, clip=True, out=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
class LinearStretch(BaseStretch):
"""
A linear stretch with a slope and offset.
The stretch is given by:
.. math::
y = slope x + intercept
Parameters
----------
slope : float, optional
The ``slope`` parameter used in the above formula. Default is 1.
intercept : float, optional
The ``intercept`` parameter used in the above formula. Default is 0.
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
self.slope = slope
self.intercept = intercept
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
if self.slope != 1:
np.multiply(values, self.slope, out=values)
if self.intercept != 0:
np.add(values, self.intercept, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LinearStretch(1. / self.slope, - self.intercept / self.slope)
class SqrtStretch(BaseStretch):
r"""
A square root stretch.
The stretch is given by:
.. math::
y = \sqrt{x}
"""
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
with np.errstate(invalid='ignore'):
np.sqrt(values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(2)
class PowerStretch(BaseStretch):
r"""
A power stretch.
The stretch is given by:
.. math::
y = x^a
Parameters
----------
a : float
The power index (see the above formula).
"""
def __init__(self, a):
super().__init__()
self.power = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(values, self.power, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(1. / self.power)
class PowerDistStretch(BaseStretch):
r"""
An alternative power stretch.
The stretch is given by:
.. math::
y = \frac{a^x - 1}{a - 1}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
``a`` cannot be set to 1.
"""
def __init__(self, a=1000.0):
if a == 1: # singularity
raise ValueError("a cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(self.exp, values, out=values)
np.subtract(values, 1, out=values)
np.true_divide(values, self.exp - 1.0, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedPowerDistStretch(a=self.exp)
class InvertedPowerDistStretch(BaseStretch):
r"""
Inverse transformation for
`~astropy.image.scaling.PowerDistStretch`.
The stretch is given by:
.. math::
y = \frac{\log(y (a-1) + 1)}{\log a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
``a`` cannot be set to 1.
"""
def __init__(self, a=1000.0):
if a == 1: # singularity
raise ValueError("a cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp - 1.0, out=values)
np.add(values, 1, out=values)
_logn(self.exp, values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerDistStretch(a=self.exp)
class SquaredStretch(PowerStretch):
r"""
A convenience class for a power stretch of 2.
The stretch is given by:
.. math::
y = x^2
"""
def __init__(self):
super().__init__(2)
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SqrtStretch()
class LogStretch(BaseStretch):
r"""
A log stretch.
The stretch is given by:
.. math::
y = \frac{\log{(a x + 1)}}{\log{(a + 1)}}.
Parameters
----------
a : float
The ``a`` parameter used in the above formula. Default is 1000.
"""
def __init__(self, a=1000.0):
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp, out=values)
np.add(values, 1., out=values)
np.log(values, out=values)
np.true_divide(values, np.log(self.exp + 1.), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedLogStretch(self.exp)
class InvertedLogStretch(BaseStretch):
r"""
Inverse transformation for `~astropy.image.scaling.LogStretch`.
The stretch is given by:
.. math::
y = \frac{e^{y} (a + 1) -1}{a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. Default is 1000.
"""
def __init__(self, a):
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, ou |
fdiehl/apsis | code/apsis/tests/test_models/test_experiment.py | Python | mit | 4,273 | 0.000936 | __author__ = 'Frederik Diehl'
from apsis.models.experiment import Experiment
from nose.tools import assert_equal, assert_raises, assert_dict_equal, \
assert_true, assert_false
from apsis.models.candidate import Candidate
from apsis.models.parameter_definition import *
class TestExperiment(object):
exp = None
def setup(self):
name = "test_ | experiment"
param_def = {
"x": MinMaxNumericParamDef(0, 1),
"name": NominalParamDef(["A", "B", "C"])
}
param_def_wrong = {
"x": MinMaxNumericParamDef(0, 1),
"name": ["A", "B", "C"]
}
minimization = True
self.exp = Experiment(name, param_def, minimization)
assert_equal(self.exp.name, name)
assert_equal(self.exp.parameter_definitions, param_def)
assert_equal(self.exp.minimizat | ion_problem, minimization)
with assert_raises(ValueError):
Experiment("fails", False)
with assert_raises(ValueError):
Experiment("fails too", param_def_wrong)
def test_add(self):
cand = Candidate({"x": 1, "name": "A"})
cand_invalid = Candidate({"x": 1})
cand_invalid2 = Candidate({"x": 2, "name": "A"})
with assert_raises(ValueError):
self.exp.add_pending(cand_invalid)
with assert_raises(ValueError):
self.exp.add_pending(cand_invalid2)
self.exp.add_pending(cand)
assert cand in self.exp.candidates_pending
with assert_raises(ValueError):
self.exp.add_pending(False)
self.exp.add_finished(cand)
assert cand in self.exp.candidates_finished
with assert_raises(ValueError):
self.exp.add_finished(False)
cand2 = Candidate({"x": 0, "name": "B"})
self.exp.add_working(cand2)
assert cand2 in self.exp.candidates_working
with assert_raises(ValueError):
self.exp.add_working(False)
self.exp.add_pausing(cand2)
assert cand2 in self.exp.candidates_pending
with assert_raises(ValueError):
self.exp.add_pausing(False)
self.exp.add_working(cand2)
assert cand2 in self.exp.candidates_working
with assert_raises(ValueError):
self.exp.add_working(False)
self.exp.add_finished(cand2)
assert cand2 in self.exp.candidates_finished
with assert_raises(ValueError):
self.exp.add_finished(False)
def test_better_cand(self):
cand = Candidate({"x": 1, "name": "B"})
cand2 = Candidate({"x": 0, "name": "A"})
cand_none = Candidate({"x": 0.5, "name": "C"})
cand_invalid = Candidate({"x": 0.5, "name": "D"})
cand.result = 1
cand2.result = 0
assert_true(self.exp.better_cand(cand2, cand))
assert_true(self.exp.better_cand(cand2, cand_none))
self.exp.minimization_problem = False
assert_true(self.exp.better_cand(cand, cand2))
assert_false(self.exp.better_cand(cand2, cand))
assert_true(self.exp.better_cand(cand, None))
assert_false(self.exp.better_cand(None, cand))
assert_false(self.exp.better_cand(None, None))
with assert_raises(ValueError):
self.exp.better_cand(cand, cand_invalid)
with assert_raises(ValueError):
self.exp.better_cand(cand_invalid, cand)
with assert_raises(ValueError):
self.exp.better_cand("fails", cand)
with assert_raises(ValueError):
self.exp.better_cand(cand, "fails")
def test_warp(self):
cand = Candidate({"x": 1})
cand_out = self.exp.warp_pt_out(self.exp.warp_pt_in(cand.params))
assert_dict_equal(cand.params, cand_out)
def test_to_dict(self):
cand = Candidate({"x": 1, "name": "A"})
self.exp.add_finished(cand)
self.exp.to_dict()
def test_check_param_dict(self):
param_dict = {"x": 1}
assert_false(self.exp._check_param_dict(param_dict))
param_dict = {"x": 1,
"name": "D"}
assert_false(self.exp._check_param_dict(param_dict))
param_dict = {"x": 1,
"name": "A"}
assert_true(self.exp._check_param_dict(param_dict)) |
akretion/delivery-carrier | base_delivery_carrier_label/tests/__init__.py | Python | agpl-3.0 | 30 | 0 | from . import test_ge | t_weight | |
Debian/debsources | lib/debsources/tests/test_archiver.py | Python | agpl-3.0 | 6,666 | 0.0012 | # Copyright (C) 2014-2021 The Debsources developers
# <qa-debsources@lists.alioth.debian.org>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/AUTHORS
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/COPYING
import logging
import shutil
import tempfile
import unittest
from pathlib import Path
from nose.plugins.attrib import attr
from nose.tools import istest
from debsources import archiver, db_storage, debmirror, mainlib, statistics, updater
from debsources.consts import DEBIAN_RELEASES
from debsources.tests.db_testing import DbTestFixture
from debsources.tests.testdata import TEST_DATA_DIR
from debsources.tests.updater_testing import mk_conf
@attr("infra")
@attr("postgres")
class Archiver(unittest.TestCase, DbTestFixture):
TEST_STAGES = set([updater.STAGE_EXTRACT, updater.STAGE_SUITES, updater.STAGE_GC])
def setUp(self):
self.db_setup()
self.tmpdir = tempfile.mkdtemp(suffix=".debsources-test")
self.conf = mk_conf(Path(self.tmpdir))
self.conf["stages"] = self.TEST_STAGES
self.longMessage = True
self.maxDiff = None
orig_sources = TEST_DATA_DIR / "sources"
dest_sources = self.conf["sources_dir"]
shutil.copytree(orig_sources, dest_sources)
mainlib.init_logging(self.conf, console_verbosity=logging.WARNING)
obs, exts = mainlib.load_hooks(self.conf)
self.conf["observers"], self.conf["file_exts"] = obs, exts
self.archive = debmirror.SourceMirrorArchive(self.conf["mirror_archive_dir"])
def tearDown(self):
self.db_teardown()
shutil.rmtree(self.tmpdir)
def assertHasPackage(self, package, version):
p = db_storage.lookup_package(self.session, package, version)
self.assertIsNotNone(p, msg="missing package %s/%s" % (package, version))
return p
def assertHasLivePackage(self, package, version):
p = self.assertHasPackage(package, version)
self.assertFalse(
p.sticky, msg="unexpected sticky bit | on package %s/%s" % (package, version)
)
def assertHasStickyPackage(self, p | ackage, version):
p = self.assertHasPackage(package, version)
self.assertTrue(
p.sticky, msg="missing sticky bit on package %s/%s" % (package, version)
)
def assertLacksStickyPackage(self, package, version):
p = db_storage.lookup_package(self.session, package, version)
self.assertIsNone(p, msg="missing sticky package %s/%s" % (package, version))
def assertHasStickySuite(self, suite):
s = db_storage.lookup_db_suite(self.session, suite, sticky=True)
self.assertIsNotNone(s, msg="missing sticky suite " + suite)
def assertLacksStickySuite(self, suite):
s = db_storage.lookup_db_suite(self.session, suite, sticky=True)
self.assertIsNone(s, msg="present sticky suite " + suite)
@istest
@attr("slow")
def addsStickySuites(self):
SUITES = ["hamm", "slink"]
PACKAGES = [
("3dchess", "0.8.1-3"), # hamm
("ed", "0.2-16"), # hamm
("WMRack", "1.0b3-1"),
] # slink, pkg w/ weird naming
for suite in SUITES:
archiver.add_suite(self.conf, self.session, suite, self.archive)
for suite in SUITES:
self.assertHasStickySuite(suite)
s = db_storage.lookup_db_suite(self.session, suite, sticky=True)
rel_info = DEBIAN_RELEASES[suite]
self.assertEqual(s.version, rel_info["version"])
self.assertEqual(s.release_date, rel_info["date"])
for pkg in PACKAGES:
self.assertHasStickyPackage(*pkg)
@istest
@attr("slow")
def removesStickySuite(self):
SARGE_PACKAGES = [("asm", "1.5.2-1"), ("zziplib", "0.12.83-4")]
stats_file = self.conf["cache_dir"] / "stats.data"
# to test stats.data cleanup
self.conf["stages"] = self.TEST_STAGES.union(set([updater.STAGE_STATS]))
archiver.add_suite(self.conf, self.session, "sarge", self.archive)
self.assertHasStickySuite("sarge")
for pkg in SARGE_PACKAGES:
self.assertHasStickyPackage(*pkg)
stats = statistics.load_metadata_cache(stats_file)
self.assertTrue("debian_sarge.sloccount" in stats)
archiver.remove_suite(self.conf, self.session, "sarge")
self.assertLacksStickySuite("sarge")
for pkg in SARGE_PACKAGES:
self.assertLacksStickyPackage(*pkg)
stats = statistics.load_metadata_cache(stats_file)
self.assertFalse("debian_sarge.sloccount" in stats)
@istest
@attr("slow")
def countsReferences(self):
DUP_PKG = ("2utf", "1.04") # in both hamm and slink
archiver.add_suite(self.conf, self.session, "hamm", self.archive)
self.assertHasStickyPackage(*DUP_PKG)
archiver.add_suite(self.conf, self.session, "slink", self.archive)
self.assertHasStickyPackage(*DUP_PKG)
archiver.remove_suite(self.conf, self.session, "hamm")
self.assertHasStickyPackage(*DUP_PKG)
archiver.remove_suite(self.conf, self.session, "slink")
self.assertLacksStickyPackage(*DUP_PKG)
@istest
@attr("slow")
def stayClearOfLiveSuites(self):
# in both lenny (sticky) and squeeze (live)
DUP_PKG = ("libcaca", "0.99.beta17-1")
self.assertHasLivePackage(*DUP_PKG)
archiver.add_suite(self.conf, self.session, "lenny", self.archive)
self.assertHasStickyPackage(*DUP_PKG)
archiver.remove_suite(self.conf, self.session, "lenny")
self.assertHasLivePackage(*DUP_PKG)
@istest
@attr("slow")
def guessAreaForSectionlessPkgs(self):
sectionless_pkg = ("tripwire", "1.2-15")
archiver.add_suite(self.conf, self.session, "slink", self.archive)
p = db_storage.lookup_package(self.session, *sectionless_pkg)
self.assertEqual("non-free", p.area)
@istest
@attr("slow")
def canAddPkgsWSpecialFiles(self):
pkg_w_pipe = ("freewrl", "0.20.a1-3")
archiver.add_suite(self.conf, self.session, "potato", self.archive)
self.assertHasStickyPackage(*pkg_w_pipe)
|
austinzheng/swift | utils/gyb_syntax_support/AvailabilityNodes.py | Python | apache-2.0 | 4,872 | 0 | from Child import Child
from Node import Node # noqa: I201
AVAILABILITY_NODES = [
# availability-spec-list -> availability-entry availability-spec-list?
Node('AvailabilitySpecList', kind='SyntaxCollection',
element='AvailabilityArgument'),
# Wrapper for all the different entries that may occur inside @available
# availability-entry -> '*' ','?
# | identifier ','?
# | availability-version-restriction ','?
# | availability-versioned-argument ','?
Node('AvailabilityArgument', kind='Syntax',
description='''
A single argument to an `@available` argument like `*`, `iOS 10.1`, \
or `message: "This has been deprecated"`.
''',
children=[
Child('Entry', kind='Syntax',
description='The actual argument',
node_choices=[
Child('Star', kind='SpacedBinaryOperatorToken',
text_choices=['*']),
Child('IdentifierRestriction',
kind='IdentifierToken'),
Child('AvailabilityVersionRestriction',
kind='AvailabilityVersionRestriction'),
Child('AvailabilityLabeledArgument',
kind='AvailabilityLabeledArgument'),
]),
Child('TrailingComma', kind='CommaToken', is_optional=True,
description='''
A trailing comma if the argument is followed by another \
argument
'''),
]),
# Representation of 'deprecated: 2.3', 'message: "Hello world"' etc.
# availability-versioned-argument -> identifier ':' version-tuple
Node('AvailabilityLabeledArgument', kind='Syntax',
description='''
A argument to an `@available` attribute that consists of a label and \
a value, e.g. `message: "This has been deprecated"`.
''',
children=[
Child('Label', kind='IdentifierToken',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating label and value'),
Child('Value', kind='Syntax',
node_choices=[
Child('String', 'StringLiteralToken'),
Child('Version', 'VersionTuple'),
], description='The value of this labeled argument',),
]),
# Representation for 'iOS 10', 'swift 3.4' etc.
# availability-version-restriction -> identifier version-tuple
Node('AvailabilityVersionRestriction', kind='Syntax',
description='''
An argument to `@available` that restricts the availability on a \
certain platform to a version, e.g. `iOS 10` or `swift 3.4`.
''',
children=[
Child('Platform', kind='IdentifierToken',
classification='Keyword',
description='''
The name of the OS on which the availability should be \
restricted or 'swift' if the availability should be \
restricted based on a Swift version.
'''),
Child('Version', kind='VersionTuple'),
]),
# version-tuple -> integer-litera | l
# | float-literal
# | float-literal '.' integer-literal
Node('VersionTuple', kind='Syntax',
description='''
A version number of the form major.minor.patch in which the minor \
and patch part may be ommited.
''',
children=[
Child('MajorMinor', kind='Syntax',
n | ode_choices=[
Child('Major', kind='IntegerLiteralToken'),
Child('MajorMinor', kind='FloatingLiteralToken')
], description='''
In case the version consists only of the major version, an \
integer literal that specifies the major version. In case \
the version consists of major and minor version number, a \
floating literal in which the decimal part is interpreted \
as the minor version.
'''),
Child('PatchPeriod', kind='PeriodToken', is_optional=True,
description='''
If the version contains a patch number, the period \
separating the minor from the patch number.
'''),
Child('PatchVersion', kind='IntegerLiteralToken',
is_optional=True, description='''
The patch version if specified.
'''),
]),
]
|
photoninger/ansible | lib/ansible/modules/packaging/os/homebrew_cask.py | Python | gpl-3.0 | 18,051 | 0.000499 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew_cask
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Enric Lluelles (@enriclluelles)"
requirements:
- "python >= 2.6"
short_description: Install/uninstall homebrew casks.
description:
- Manages Homebrew casks.
version_added: "1.6"
options:
name:
description:
- name of cask to install/remove
required: true
aliases: ['pkg', 'package', 'cask']
path:
description:
- "':' separated list of paths to search for 'brew' executable."
required: false
default: '/usr/local/bin'
state:
description:
- state of the cask
choices: [ 'present', 'absent' ]
required: false
default: present
update_homebrew:
description:
- update homebrew itself first. Note that C(brew cask update) is
a synonym for C(brew update).
required: false
default: no
choices: [ "yes", "no" ]
aliases: ['update-brew']
version_added: "2.2"
install_options:
description:
- options flags to install a package
required: false
default: null
aliases: ['options']
version_added: "2.2"
'''
EXAMPLES = '''
- homebrew_cask:
name: alfred
state: present
- homebrew_cask:
name: alfred
state: absent
- homebrew_cask:
name: alfred
state: present
install_options: 'appdir=/Applications'
- homebrew_cask:
name: alfred
state: present
install_options: 'debug,appdir=/Applications'
- homebrew_cask:
name: alfred
state: absent
install_options: force
'''
import os.path
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, string_types
# exceptions -------------------------------------------------------------- {{{
class HomebrewCaskException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class HomebrewCask(object):
'''A class to manage Homebrew casks.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # d | ashes
'''.format(sep=os.path.sep)
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ | # slash (for taps)
- # dashes
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, (string_types)):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, string_types)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_cask(cls, cask):
'''A valid cask is either None or alphanumeric + backslashes.'''
if cask is None:
return True
return (
isinstance(cask, string_types)
and not cls.INVALID_CASK_REGEX.search(cask)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- installed
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, string_types)
and state.lower() in (
'installed',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewCaskException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewCaskException(self.message)
else:
if isinstance(path, string_types):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewCaskException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_cask(self):
return self._current_cask
@current_cask.setter
def current_cask(self, cask):
if not self.valid_ |
bswartz/cinder | cinder/tests/unit/test_quota_utils.py | Python | apache-2.0 | 10,416 | 0 | # Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import quota_utils
from cinder import test
from keystoneclient import exceptions
from oslo_config im | port cfg
from oslo_config import fixture as config_fixture
CONF = cfg.CONF
class QuotaUtilsTest(test.TestCase):
class FakeProject(object):
def __init__(self, id='foo', parent_id=None):
self.id = id
self.parent_id = parent_id
self.subtree = None
self.parents = N | one
self.domain_id = 'default'
def setUp(self):
super(QuotaUtilsTest, self).setUp()
self.auth_url = 'http://localhost:5000'
self.context = context.RequestContext('fake_user', 'fake_proj_id')
self.fixture = self.useFixture(config_fixture.Config(CONF))
self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken')
@mock.patch('keystoneclient.client.Client')
@mock.patch('keystoneclient.session.Session')
def test_keystone_client_instantiation(self, ksclient_session,
ksclient_class):
quota_utils._keystone_client(self.context)
ksclient_class.assert_called_once_with(auth_url=self.auth_url,
session=ksclient_session(),
version=(3, 0))
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v2(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v2.0'
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v2.0')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
del returned_project.subtree
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3_with_subtree(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
subtree_dict = {'baz': {'quux': None}}
returned_project.subtree = subtree_dict
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar', subtree_dict)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, subtree_as_ids=True)
keystoneclient.projects.get.assert_called_once_with(
self.context.project_id, parents_as_ids=False, subtree_as_ids=True)
self.assertEqual(expected_project.__dict__, project.__dict__)
def _setup_mock_ksclient(self, mock_client, version='v3',
subtree=None, parents=None):
keystoneclient = mock_client.return_value
keystoneclient.version = version
proj = self.FakeProject(self.context.project_id)
proj.subtree = subtree
if parents:
proj.parents = parents
proj.parent_id = next(iter(parents.keys()))
keystoneclient.projects.get.return_value = proj
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_parent(
self, mock_client):
# Test with a top level project (domain is direct parent)
self._setup_mock_ksclient(mock_client, parents={'default': None})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_grandparent(
self, mock_client):
# Test with a child project (domain is more than a parent)
self._setup_mock_ksclient(mock_client,
parents={'bar': {'default': None}})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual({'bar': None}, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_domain_in_parents(
self, mock_client):
# Test that if top most parent is not a domain (to simulate an older
# keystone version) nothing gets removed from the tree
parents = {'bar': {'foo': None}}
self._setup_mock_ksclient(mock_client, parents=parents)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual(parents, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_parents(
self, mock_client):
# Test that if top no parents are present (to simulate an older
# keystone version) things don't blow up
self._setup_mock_ksclient(mock_client)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_with_keystone_v2(self, _keystone_client):
_keystone_client.side_effect = exceptions.VersionNotAvailable
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_non_cloud_admin(self, _keystone_client):
# Covers not cloud admin or using old policy.json
_keystone_client.side_effect = exceptions.Forbidden
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
def _process_reserve_over_quota(self, overs, usages, quotas,
expected_ex,
resource='volumes'):
ctxt = context.get_admin_context()
ctxt.project_id = 'fake'
size = 1
kwargs = {'overs': overs,
'usages': usages,
'quotas': quotas}
exc = exception.OverQuota(**kwargs)
self.assertRaises(expected_ex,
quota_utils.process_reserve_over_quota,
ctxt, exc,
resource=resource,
|
aluminiumgeek/psychedelizer | pynbome/filters/glass.py | Python | gpl-3.0 | 805 | 0.008696 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# pynbome library
# glass.py (c) Mikhail Mezyakov <mihail265@gmail.com>
#
# View through tiled glass
import os
import subprocess
from wand.image import Image
from . import prepare_filter
@prepare_filter
def apply_filter(input_filename, output_filename):
setting | s = {
'amount': 25,
'granularity': 5
}
script_path = os.path.join(os.path.dirname(__file__), '../lib/glasseffects')
command = '{0} -e displace -a {3} -g {4} -w 0 -n 100 {1} {2}'.format(
script_path,
input_filename,
output_filename,
settings['amount'],
settings['granularity']
)
process = subprocess.Popen(command.split())
process | .wait()
return Image(filename=output_filename), settings
|
lanyuwen/openthread | tests/scripts/thread-cert/Cert_5_1_06_RemoveRouterId.py | Python | bsd-3-clause | 4,328 | 0 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIAB | ILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
import mle
import thread_cert
from command import | CheckType
LEADER = 1
ROUTER1 = 2
class Cert_5_1_06_RemoveRouterId(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'mode': 'rsdn',
'panid': 0xface,
'whitelist': [ROUTER1]
},
ROUTER1: {
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
self.nodes[LEADER].release_router_id(rloc16 >> 10)
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
leader_messages.next_coap_message("2.04")
# 2 - N/A
# 3 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
command.check_parent_request(msg, is_first_request=True)
msg = router1_messages.next_mle_message(
mle.CommandType.CHILD_ID_REQUEST, sent_to_node=self.nodes[LEADER])
command.check_child_id_request(
msg,
tlv_request=CheckType.CONTAIN,
mle_frame_counter=CheckType.OPTIONAL,
address_registration=CheckType.NOT_CONTAIN,
active_timestamp=CheckType.OPTIONAL,
pending_timestamp=CheckType.OPTIONAL,
)
msg = router1_messages.next_coap_message(code="0.02")
command.check_address_solicit(msg, was_router=True)
# 4 - Router1
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
JonSteinn/Kattis-Solutions | src/Coloring Socks/Python 3/main.py | Python | gpl-3.0 | 689 | 0.024673 |
def machines_needed(socks,cap,diff,colors):
if colors[0]-colors[-1] <= diff and cap >= socks:
return 1
machines,curr_cap,i,j = (0,cap,0,0)
while True:
if curr_cap == 0:
machines += 1
curr_cap = cap
i = j
elif colors[i]-colors[j] > diff:
machines += 1
i = j
curr_cap = cap
elif j < socks-1:
j += 1
cur | r_cap -= 1
else:
return machines + 1
def main():
s,c,k = map(int,input().split())
colors = sorted(map(int,input().split()),reverse=True)
print(ma | chines_needed(s,c,k,colors))
if __name__ == "__main__":
main()
|
mdaus/coda-oss | modules/python/math.linear/tests/test_math_linear.py | Python | lgpl-3.0 | 9,396 | 0.004151 | #!/usr/bin/env python
"""
* =========================================================================
* This file is part of math.linear-c++
* =========================================================================
*
* (C) Copyright 2004 - 2014, MDA Information Systems LLC
*
* math.linear-c++ is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; | either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesse | r General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; If not,
* see <http://www.gnu.org/licenses/>.
*
*
"""
import sys
import numpy as np
from copy import deepcopy
from coda.math_linear import *
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
if __name__ == '__main__':
####################################
# VectorN<2, double> Bindings Test #
####################################
vec2 = Vector2()
vec2[0] = 100
vec2[1] = 200
print("Vector2:")
print(vec2)
# Test deep copy
vec2_copy = deepcopy(vec2)
if vec2_copy[0] != 100 or vec2_copy[1] != 200:
sys.exit('Vector2 did not perform a deep copy')
vec2_copy[0] = 300
if vec2[0] != 100:
sys.exit('Vector2 did not perform a deep copy')
# Test pickling and unpickling
pvec2 = pickle.loads(pickle.dumps(vec2))
for i in range(2):
if pvec2[i] != vec2[i]:
sys.exit('Vector2 was not pickled correctly')
####################################
# VectorN<3, double> Bindings Test #
####################################
vec3 = Vector3()
vec3[0] = 10
vec3[1] = 20
vec3[2] = 30
print("Vector3:")
print(vec3)
vec3_copy = deepcopy(vec3)
if vec3_copy[0] != 10 or vec3_copy[1] != 20 or vec3_copy[2] != 30:
sys.exit('Vector3 did not perform a deep copy')
vec3_copy[0] = 40
if vec3[0] != 10:
sys.exit('Vector3 did not perform a deep copy')
# Test pickling and unpickling
pvec3 = pickle.loads(pickle.dumps(vec3))
for i in range(3):
if pvec3[i] != vec3[i]:
sys.exit('Vector3 was not pickled correctly')
################################
# Vector<double> Bindings Test #
################################
size = 3
vecD = VectorDouble(size)
vecD[0] = 10
vecD[1] = 20
vecD[2] = 30
print("\nVectorDouble:")
print(vecD)
# NumPy compatibility test
example = np.asarray([10,20,30])
converted = np.asarray(vecD.vals())
print("np.array_equal(example, converted):",np.array_equal(example, converted))
# Try to get the -1st element
threw = False
try:
foo = vecD[-1]
print(foo)
except ValueError:
threw = True
if threw:
print('\nGetting (OOB low) threw as expected')
else:
sys.exit('Getting (OOB low) did not throw!')
# Try to get the N+1th element
threw = False
try:
foo = vecD[size]
print(foo)
except ValueError:
threw = True
if threw:
print('Getting (OOB high) threw as expected')
else:
sys.exit('Getting (OOB high) did not throw!')
# Try to set the -1st element
threw = False
try:
vecD[-1] = 100
print(foo)
except ValueError:
threw = True
if threw:
print('Setting (OOB low) threw as expected')
else:
sys.exit('Setting (OOB low) did not throw!')
# Try to set the N+1th element
threw = False
try:
vecD[size] = 100
print(foo)
except ValueError:
threw = True
if threw:
print('Setting (OOB high) threw as expected')
else:
sys.exit('Setting (OOB high) did not throw!')
##################################
# Matrix2D<double> Bindings Test #
##################################
dim = 3
matrixD = MatrixDouble(dim, dim)
matrixD[0,0] = 0
matrixD[0,1] = 1
matrixD[0,2] = 2
matrixD[1,0] = 3
matrixD[1,1] = 4
matrixD[1,2] = 5
matrixD[2,0] = 6
matrixD[2,1] = 7
matrixD[2,2] = 8
print("\nMatrix2D:")
print(matrixD)
# NumPy compatibility test
example = np.asarray([[0,1,2],[3,4,5],[6,7,8]])
converted = np.asarray(matrixD.vals())
print(("np.array_equal(example, converted):",np.array_equal(example, converted)))
# Try to get a value in the -1st row
threw = False
try:
foo = matrixD[-1,0]
except ValueError:
threw = True
if threw:
print('\nGetting (OOB low row) threw as expected')
else:
sys.exit('Getting (OOB low row) did not throw!')
# Try to get a value in the N+1th row
threw = False
try:
foo = matrixD[dim,0]
except ValueError:
threw = True
if threw:
print('Getting (OOB high row) threw as expected')
else:
sys.exit('Getting (OOB high row) did not throw!')
# Try to get a value in the -1st column
threw = False
try:
foo = matrixD[0,-1]
except ValueError:
threw = True
if threw:
print('Getting (OOB low column) threw as expected')
else:
sys.exit('Getting (OOB low column) did not throw!')
# Try to get a value in the N+1th column
threw = False
try:
foo = matrixD[0,dim]
except ValueError:
threw = True
if threw:
print('Getting (OOB high column) threw as expected')
else:
sys.exit('Getting (OOB high column) did not throw!')
# Try to get a value in a non-integer row
threw = False
try:
foo = matrixD[0.5,0]
except TypeError:
threw = True
if threw:
print('Getting (Non-integer row) threw as expected')
else:
sys.exit('Getting (Non-integer row) did not throw!')
# Try to get a value in a non-integer column
threw = False
try:
foo = matrixD[0,0.5]
except TypeError:
threw = True
if threw:
print('Getting (Non-integer column) threw as expected')
else:
sys.exit('Getting (Non-integer column) did not throw!')
# Try to get a value with too few arguments
threw = False
try:
foo = matrixD[0]
except TypeError:
threw = True
if threw:
print('Getting (Too few arguments) threw as expected')
else:
sys.exit('Getting (Too few arguments) did not throw!')
# Try to get a value with too many arguments
threw = False
try:
foo = matrixD[0,1,2]
except TypeError:
threw = True
if threw:
print('Getting (Too many arguments) threw as expected')
else:
sys.exit('Getting (Too many arguments) did not throw!')
# Try to set a value in the -1st row
threw = False
try:
matrixD[-1,0] = 100
except ValueError:
threw = True
if threw:
print('Setting (OOB low row) threw as expected')
else:
sys.exit('Setting (OOB low row) did not throw!')
# Try to set a value in the N+1th row
threw = False
try:
matrixD[dim,0] = 100
except ValueError:
threw = True
if threw:
print('Setting (OOB high row) threw as expected')
else:
sys.exit('Setting (OOB high row) did not throw!')
# Try to set a value in the -1st column
threw = False
try:
matrixD[0,-1] = 100
except ValueError:
threw = True
if threw:
print('Setting (OOB low column) threw as expected')
else:
sys.exit('Setting (OOB low column) did not throw!')
# Try to set a value in the N+1th column
threw = False
try:
matrixD[0,dim] = 100
except ValueError:
threw = True
if threw:
print('Setting (OOB high column) threw as expected')
else:
sys.exit('Setting (OOB high c |
ksmit799/Toontown-Source | toontown/coghq/DistributedCogKartAI.py | Python | mit | 2,463 | 0.004872 | from direct.directnotify import DirectNotifyGlobal
from toontown.safezone import DistributedGolfKartAI
from toontown.building import DistributedElevatorExtAI
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownGlobals
class DistributedCogKartAI(DistributedElevatorExtAI.DistributedElevatorExtAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogKartAI')
def __init__(self, air, index, x, y, z, h, p, r, bldg, minLaff):
self.posHpr = (x,
y,
z,
h,
p,
r)
DistributedElevatorExtAI.DistributedElevatorExtAI.__init__(self, air, bldg, minLaff=minLaff)
self.type = ElevatorConstants.ELEVATOR_COUNTRY_CLUB
self. | courseIndex = index
if self.courseIndex == 0:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntA
elif self.courseIndex == 1:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntB
elif self.courseIndex == 2:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntC
else:
self.countryClubId = 12500
def getPosHpr(self):
return self.posHpr
def elevatorClosed( | self):
numPlayers = self.countFullSeats()
if numPlayers > 0:
players = []
for i in self.seats:
if i not in [None, 0]:
players.append(i)
countryClubZone = self.bldg.createCountryClub(self.countryClubId, players)
for seatIndex in range(len(self.seats)):
avId = self.seats[seatIndex]
if avId:
self.sendUpdateToAvatarId(avId, 'setCountryClubInteriorZone', [countryClubZone])
self.clearFullNow(seatIndex)
else:
self.notify.warning('The elevator left, but was empty.')
self.fsm.request('closed')
return
def sendAvatarsToDestination(self, avIdList):
if len(avIdList) > 0:
countryClubZone = self.bldg.createCountryClub(self.countryClubId, avIdList)
for avId in avIdList:
if avId:
self.sendUpdateToAvatarId(avId, 'setCountryClubInteriorZoneForce', [countryClubZone])
def getCountryClubId(self):
return self.countryClubId
def enterClosed(self):
DistributedElevatorExtAI.DistributedElevatorExtAI.enterClosed(self)
self.fsm.request('opening')
|
mitchellcash/ion | contrib/linearize/linearize-hashes.py | Python | mit | 3,123 | 0.032341 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Copyright (c) 2015-2018 The PIVX developers
# Copyright (c) 2018 The Ion developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in se | ttings | :
settings['port'] = 51473
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
pearu/sympycore | sympycore/heads/neg.py | Python | bsd-3-clause | 4,464 | 0.001792 |
__all__ = ['NEG']
from .base import heads_precedence, ArithmeticHead
from ..core import init_module
init_module.import_heads()
init_module.import_lowlevel_operations()
init_module.import_numbers()
class NegHead(ArithmeticHead):
"""
NegHead represents negative unary operation where operand (data)
is an expression.
"""
op_mth = '__neg__'
def is_data_ok(self, cls, data):
if not isinstance(data, cls):
return '%s data part must be %s instance but got %s' % (self, cls, type(data))
def __repr__(self): return 'NEG'
def new(self, cls, expr, evaluate=True):
if not evaluate:
return cls(self, expr)
h, d = expr.pair
if h is NUMBER:
return cls(NUMBER, -d)
if h is TERM_COEFF:
t, c = d
return TERM_COEFF.new(cls, (t, -c))
return cls(NEG, expr)
def reevaluate(self, cls, expr):
return -expr
def data_to_str_and_precedence(self, cls, expr):
if cls.algebra_options.get('evaluate_addition'):
if expr.head is NEG:
expr = expr.data
return expr.head.data_to_str_and_precedence(cls, expr.data)
s, s_p = expr.head.data_to_str_and_precedence(cls, expr.data)
neg_p = heads_precedence.NEG
if s_p < neg_p:
return '-(' + s + ')', neg_p
return '-' + s, neg_p
def to_lowlevel(self, cls, data, pair):
if isinstance(data, numbertypes):
return -data
if data.head is NUMBER:
return -data.data
return cls(TERM_COEFF, (data, -1))
def term_coeff(self, cls, expr):
e = expr.data
t, c = e.head.term_coeff(cls, e)
return t, -c
def scan(self, proc, cls, expr, target):
expr.head.scan(proc, cls, expr.data, target)
proc(cls, self, expr, target)
def walk(self, func, cls, operand, target):
operand1 = operand.head.walk(func, cls, operand.data, operand)
if operand1 is operand:
return func(cls, self, operand, target)
r = self.new(cls, operand1)
return func(cls, r.head, r.data, r)
def to_TERM_COEFF_DICT(self, Algebra, data, expr):
return -data.head.to_TERM_COEFF_DICT(Algebra, data.data, data)
def to_ADD(self, Algebra, data, expr):
return -data.head.to_ADD(Algebra, data.data, data)
def algebra_pos(self, Algebra, expr):
return expr
def algebra_neg(self, Algebra, expr):
if Algebra.algebra_options.get('evaluate_addition'):
return expr.data
return Algebra(NEG, expr)
def algebra_add_number(self, Algebra, lhs, rhs, inplace):
return self.algebra_add( | Algebra, lhs, Algebra(NUMBER, rhs), inplace)
def algebra_add(self, Algebra, lhs, rhs, inplace):
rhead, rdata = rhs.pair
if rhead is ADD:
data = [lhs] + rdata
elif rhead is TERM_COEFF_DICT or rhead is EXP_COEFF_DICT:
data = [lhs] + rhs | .to(ADD).data
else:
data = [lhs, rhs]
if Algebra.algebra_options.get('evaluate_addition'):
ADD.combine_add_list(Algebra, data)
return add_new(Algebra, data)
def algebra_mul_number(self, Algebra, lhs, rhs, inplace):
if Algebra.algebra_options.get('is_additive_group_commutative'):
term, coeff = lhs.head.term_coeff(Algebra, lhs)
return term_coeff_new(Algebra, (term, coeff * rhs))
else:
if Algebra.algebra_options.get('evaluate_addition'):
if rhs == 0:
return Algebra(NUMBER, 0)
term, coeff = lhs.head.term_coeff(Algebra, lhs)
return term_coeff_new(Algebra, (term, coeff * rhs))
return mul_new(Algebra, [lhs, Algebra(NUMBER, rhs)])
def algebra_mul(self, Algebra, lhs, rhs, inplace):
ldata = lhs.data
if Algebra.algebra_options.get('is_additive_group_commutative'):
return super(type(self), self).algebra_mul(Algebra, lhs, rhs, inplace)
else:
if Algebra.algebra_options.get('evaluate_addition'):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return ldata.head.algebra_mul_number(Algebra, ldata, -rdata, inplace)
return super(type(self), self).algebra_mul(Algebra, lhs, rhs, inplace)
return mul_new(Algebra, [lhs, rhs])
NEG = NegHead()
|
colloquium/spacewalk | client/rhel/yum-rhn-plugin/test/settestpath.py | Python | gpl-2.0 | 992 | 0 | # yum-rhn-plugin - RHN support for yum
#
# Copyright (C) 2006 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more detail | s.
| #
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
# Adjust path so we can see the src modules running from branch as well
# as test dir:
sys.path.insert(0, './')
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
|
cmr/cmr_rrt | src/map.py | Python | gpl-3.0 | 2,764 | 0.002894 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
from rtree import index
# A map representing a bounded, continuous 2D world with discrete rectangular
# obstacles.
class SimpleMap(object):
# obstacles should be a list of tuples representing the lower left and
# upper right bounds of the obstacle.
def __init__(self, xmin, xmax, ymin, ymax, obstacles):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
p = index.Property()
p.dimension = 2
self.spidx = index.Index(properties=p)
self.max = 0
self.invspidx = {}
self.obstimg = obstacles
def add_obstacle(self, mi, ma):
self.spidx.insert(self.max, mi + ma)
self.invspidx[self.max] = (mi, ma)
self.max += 1
def collides_img(self, pt):
if not self.inbounds((pt[0]-3, pt[1]-3), (pt[0]+3, pt[1]+3)):
return True
for x in range(-3, 3):
for y in range(-3, 3):
try:
if all(map(lambda c: c == 0, self.obstimg.getpixel((int(pt[0])+x, int(pt[1])+y))[:3])):
return Tr | ue
except IndexError:
print "Wtf IndexError? It's inbounds! %s" % str((pt[0], pt[1]))
return True
return False
def inbounds(self, mi, ma, dbg=True):
| if mi[0] < self.xmin or mi[0] >= self.xmax or ma[0] < self.xmin or ma[0] >= self.xmax:
return False
if mi[1] < self.ymin or mi[1] >= self.ymax or ma[1] < self.ymin or ma[1] >= self.ymax:
return False
return True
def collides_any(self, mi, ma, dbg=True):
self.inbounds(mi, ma, dbg)
l = list(self.spidx.intersection(mi, ma))
if l and dbg:
print "(%s, %s) intersected with these things: %s" % (mi, ma, l)
return bool(l)
def random_free_point(self):
x, y = random.uniform(self.xmin, self.xmax), random.uniform(self.ymin, self.ymax)
while self.collides_img((x, y)):
x, y = random.uniform(self.xmin, self.xmax), random.uniform(self.ymin, self.ymax)
return (x, y)
|
ugoertz/django-familio | comments/migrations/0001_initial.py | Python | bsd-3-clause | 1,415 | 0.003534 | # -*- coding: utf-8 -*-
from __future__ im | port unicode_literals
from django.db import models, migrations
from django.contrib.postgres.fields | import ArrayField
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('path', ArrayField(size=None, base_field=models.IntegerField(), unique=True, editable=False, blank=True)),
('object_pk', models.TextField(verbose_name='object ID')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('content_type', models.ForeignKey(related_name='content_type_set_for_comment', verbose_name='content type', to='contenttypes.ContentType', on_delete=models.CASCADE)),
('site', models.ForeignKey(to='sites.Site', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
|
google/smbios-validation-tool | setup.py | Python | apache-2.0 | 296 | 0.033784 | from setupt | ools import setup
setup(
name = "smbios_validation_tool",
author = "Xu Han",
author_email = "",
license = "Apache",
url = "https://github.com/google/smbios-validation-tool",
packages=['smbios_validation_tool', ' | dmiparse'],
scripts=['smbios_validation'],
)
|
ClovisDj/Playconnect4 | Connect4/urls.py | Python | mit | 909 | 0.0022 | """Connect4 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from userprofile import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r | '^profile | /', include('userprofile.urls')),
url(r'^$', views.Home.as_view(), name = "home"),
]
|
kyleabeauchamp/EnsemblePaper | code/figures/old/plot_ALA3_karplus.py | Python | gpl-3.0 | 1,981 | 0.021706 | import matplotlib.pyplot as plt
import numpy as np
import schwalbe_couplings
import experiment_loader
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
outdir = "/home/kyleb/src/pymd/Papers/maxent/figures/"
alpha = 0.2
num_grid = 500
phi = np.linspace(-180,180,num_grid)
O = np.ones(num_grid)
colors = ["b","g","r","y"]
simulation_data = {}
simulation_data["J3_HN_HA_2"] = schwalbe_couplings.J3_HN_HA(phi)
simulation_data["J3_HN_Cprime_2"] = schwalbe_couplings.J3_HN_Cprime(phi)
simulation_data["J3_HA_Cprime_2"] = schwalbe_couplings.J3_HA_Cprime(phi)
simulation_data["J3_HN_CB_2"] = schwalbe_couplings.J3_HN_CB(phi)
simulation_data["J1_N_CA_2"] = schwalbe_couplings.J1_N_CA(phi)
simulation_data["J2_N_CA_3"] = schwalbe_couplings.J2_N_CA(phi)
plt.plot(phi,simulation_data["J3_HN_HA_2"],'b',label="Karplus Curve")
yi = experiment_loader.experimental_data["J3_HN_HA_2"]
oi = experiment_loader.sigma_dict["J3_HN_HA"]
plt.plot(phi,O*yi,"k",label="Measured Average")
lower = yi - oi
upper = yi + oi
plt.fill_between(phi,lower*O,upper*O,color='k',alpha=alpha)
plt.xlabel(r"$\phi$ [$\circ$]")
plt.ylabel(r"$f_i(\phi) = $ J")
plt.title("Pr | ojecting $\phi$ onto J Couplings")
plt.legend(loc=0)
plt.xlim(-180,180)
plt.ylim(-0.5,10.5)
#plt.savefig(outdir+"/single_karplus.pdf",bbox_inches='tight')
plt.figure()
for i,key in enumerate(["J3_HN_HA_2","J3_HN_Cprime_2","J3_HA_Cprime_2","J3_HN_CB_2"]):
plt.plot(phi,simulation_data[key],"%s" % colors[i])
yi = experiment_loader.experimental_data[key]
oi = experiment_loader.sigma_dict[key[:-2]]
plt.plot(phi,O*yi,"%s" % colors[i])
plt | .plot(phi,O*yi,"%s" % colors[i])
lower = yi - oi
upper = yi + oi
plt.fill_between(phi,lower*O,upper*O,color=colors[i],alpha=alpha)
plt.xlabel(r"$\phi$ [$\circ$]")
plt.ylabel(r"$f_i(\phi) = $ J")
plt.xlim(-180,180)
plt.ylim(-0.5,10.5)
plt.title("Projecting $\phi$ onto J Couplings")
plt.savefig(outdir+"/multiple_karplus.pdf",bbox_inches='tight')
|
talonchandler/dipsim | notes/2017-10-10-voxel-reconstruction/figures/my_draw_scene.py | Python | mit | 5,943 | 0.004038 | import numpy as np
import subprocess
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def draw_scene(scene_string, filename='out.png', my_ax=None, dpi=500,
save_file=False, chop=True):
asy_string = """
import three;
import graph3;
settings.outformat = "pdf";
settings.prc = true;
settings.embed= true;
settings.render=16;
size(6cm,6cm);
currentprojection = orthographic(1, 1, 1);
void circle(real Theta, real Alpha, bool dash, triple color) {
triple normal = expi(Theta, 0);
real h = 1 - sqrt(2 - 2*cos(Alpha) - sin(Alpha)^2);
real radius = sin(Alpha);
path3 mycircle = circle(c=h*normal, r=radius, normal=normal);
if (dash) {
draw(mycircle, p=linetype(new real[] {8,8}, offset=xpart(color))+rgb(xpart(color), ypart(color), zpart(color)));
} else {
draw(mycircle, p=rgb(xpart(color), ypart(color), zpart(color)));
}
}
void ellipse(real Theta, real Phi, real a, real b, real theta, bool dash, triple color) {
triple normal = expi(Theta, Phi);
real a_scaled = a/max(a, b);
real b_scaled = b/max(a, b);
path3 mycircle = rotate(degrees(Phi), Z)*rotate(degrees(Theta), Y)*shift(Z)*rotate(degrees(theta), Z)*scale(a_scaled, b_scaled, 1)*circle(c=O, r=0.05, normal=Z);
if (dash) {
draw(mycircle, p=linetype(new real[] {8,8}, offset=xpart(color))+rgb(xpart(color), ypart(color), zpart(color)));
} else {
draw(mycircle, p=rgb(xpart(color), ypart(color), zpart(color)));
}
}
void mydot(real Theta, triple color) {
triple normal = expi(Theta, 0);
dot(normal, p=rgb(xpart(color), ypart(color), zpart(color)));
}
void arrow(real c, real Theta, real Phi_Pol, triple color, bool dash) {
if (dash) {
draw(rotate(Theta, Y)*rotate(Phi_Pol, Z)*(Z--(Z+0.2*X)), p=linetype(new real[] {4,4}, offset=xpart(color))+rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
draw(rotate(Theta, Y)*rotate(Phi_Pol, Z)*(Z--(Z-0.2*X)), p=linetype(new real[] {4,4}, offset=xpart(color))+rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
} else {
draw(rotate(Theta, Y)*rotate(Phi_Pol, Z)*(Z--(Z+0.2*X)), p=rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
draw(rotate(Theta, Y)*rotate(Phi_Pol, Z)*(Z--(Z-0.2*X)), p=rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
}
}
void dip_arrow(real X, real Y, real Theta, real Phi, real length, triple color) {
//draw(-expi(Theta, Phi)--expi(Theta, Phi));
draw(shift((X, Y, 0))*(O--length*expi(Theta, Phi)), p=rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
draw(shift((X, Y, 0))*(O--(-length*expi(Theta, Phi))), p=rgb(xpart(color), ypart(color), zpart(color)), arrow=Arrow3(emissive(rgb(xpart(color), ypart(color), zpart(color)))));
dot((X,Y,0), p=rgb(xpart(color), ypart(color), zpart(color)));
}
void watson(real Theta, real Phi, real kappa, real x, real y, real z) {
int n_phi = 10;
int n_theta = 10;
real max_radius = 0;
if(kappa > 0){
max_radius = exp(kappa);
}
else{
max_radius = 1.0;
}
for(int i=0; i <= n_theta; ++i) {
real Theta_i = pi*i/n_theta;
real weight = exp(kappa*(cos(Theta_i)**2))/max_radius;
path3 mycircle = circle(c=Z*weight*cos(Theta_i), r=weight*sin(Theta_i));
draw(shift((x, y, z))*rotate(angle=degrees(Phi), u=O, v=Z)*rotate(angle=degrees(Theta), u=O, v=Y)*mycircle);
}
triple f(real t) {
real weight = exp(kappa*(cos(t)**2))/max_radius;
return (0, weight*sin(t), weight*cos(t));
}
path3 phi_path = graph(f, 0, 2pi, operator ..);
for(int i=0; i <= n_phi; ++i) {
real Phi_i = 2*pi*i/n_theta;
draw | (shift((x, y, z))*rotate(angle=degrees(Phi), u=O, v=Z)*rotate(angle=degrees | (Theta), u=O, v=Y)*rotate(angle=degrees(Phi_i), u=(0,0,0), v=(0,0,1))*phi_path);
}
}
real len = 50;
draw((-len,-len)--(len,-len)--(len,len)--(-len,len)--(-len,-len), white);
draw(scale(50, 50, 0)*unitplane, surfacepen=white+opacity(0.5));
defaultpen(fontsize(8pt));
draw((0, -10, 0)--(10, -10, 0), L=Label("$10$ px", position=MidPoint, align=NW));
dotfactor=2;
"""
asy_string += scene_string
asy_string += "dot(O);shipout(scale(4.0)*currentpicture.fit());"
text_file = open("temp.asy", "w")
text_file.write(asy_string)
text_file.close()
subprocess.call(['asy', 'temp.asy'])
subprocess.call(['convert', '-density', str(dpi), '-units', 'PixelsPerInch', 'temp.pdf', 'temp.png'])
im = mpimg.imread('temp.png')
# Chop top of im to make it square and fix asy error
if chop:
im = im[int(im.shape[1]*0.075):,:,:]
f = plt.figure(figsize=(5, 5), frameon=False)
local_ax = plt.axes([0, 0, 1, 1]) # x, y, width, height
if my_ax == None:
my_ax = local_ax
for ax in [local_ax, my_ax]:
#draw_axis(ax)
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# Plot
ax.imshow(im, interpolation='none')
# Save
if save_file:
print("Saving: "+filename)
f.savefig(filename, dpi=dpi)
subprocess.call(['rm', 'temp.asy', 'temp.pdf', 'temp.png'])
return ax
|
ifwe/tasr | src/py/tasr/client_legacy.py | Python | apache-2.0 | 8,458 | 0.000946 | '''
Created on May 6, 2014
@author: cmills
The idea here is to provide client-side functions to interact with the TASR
repo. We use the requests package here. We provide both stand-alone functions
and a class with methods. The class is easier if you are using non-default
values for the host or port.
'''
import requests
import tasr.app
from tasr.registered_schema import RegisteredAvroSchema
from tasr.headers import SubjectHeaderBot, SchemaHeaderBot
from tasr.client import TASRError, reg_schema_from_url
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
TASR_HOST = APP.config.host
TASR_PORT = APP.config.port
TIMEOUT = 2 # seconds
def get_active_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/active_topics
Retrieves available metadata for active topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/active_topics' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def get_all_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic
Retrieves available metadata for all the topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/topic' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def register_schema(topic_name, schema_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/topic/<topic name>
Register a schema string for a topic. Returns a SchemaMetadata object
with the topic-version, topic-timestamp and ID metadata.
'''
url = 'http://%s:%s/tasr/topic/%s' % (host, port, topic_name)
headers = {'content-type': 'application/json; charset=utf8', }
rs = reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
return rs
def get_latest_schema(topic_name, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>
Retrieve the latest schema registered for the given topic name. Returns a
RegisteredSchema object back.
'''
return get_schema_version(topic_name, None, host, port, timeout)
def get_schema_version(topic_name, version, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>/version/<version>
Retrieve a specific schema registered for the given topic name identified
by a version (a positive integer). Returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/topic/%s/version/%s' %
(host, port, topic_name, version))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.')
def schema_for_id_str(id_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/id/<ID string>
Retrieves a schema that has been registered for at least one topic name as
identified by a hash-based ID string. The ID string is a base64 encoded
byte sequence, starting with a 1-byte ID type and followed by fingerprint
bytes for the ID type. For example, with an SHA256-based ID, a fingerprint
is 32 bytes in length, so there would be 33 ID bytes, which would produce
an ID string of length 44 once base64-encoded. The MD5-based IDs are 17
bytes (1 + 16), producing ID strings of length 24. A RegisteredSchema
object is returned.
'''
url = 'http://%s:%s/tasr/id/%s' % (host, port, id_str)
return reg_schema_from_url(url, timeout=timeout,
err_404='No schema for id.')
def schema_for_schema_str(schema_str, object_on_miss=False,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/schema
In essence this is very similar to the schema_for_id_str, but with the
calculation of the ID string being moved to the server. That is, the
client POSTs the schema JSON itself, the server canonicalizes it, then
calculates the SHA256-based ID string for what was sent, then looks for
a matching schema based on that ID string. This allows clients that do not
know how to canonicalize or hash the schemas to find the metadata (is it
registered, what version does it have for a topic) with what they have.
A RegisteredSchema object is returned if the schema string POSTed has been
registered for one or more topics.
If the schema string POSTed has yet to be registered for a topic and the
object_on_miss flag is True, a RegisteredSchema calculated for the POSTed
schema string is returned (it will have no topic-versions as there are
none). This provides an easy way for a client to get the ID strings to
use for subsequent requests.
If the object_on_miss flag is False (the default), then a request for a
previously unregistered schema will raise a TASRError.
'''
url = 'http://%s:%s/tasr/schema' % (host, port)
h | eaders = {'content-type': 'application/json; charset=utf8', }
resp = requests.post(url, data=schema_str, headers=headers,
timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if 200 == resp.status_code:
# success -- return a normal reg schema
ras = RegisteredAvroSchema()
ras.schema_str = resp.context
sc | hema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
elif 404 == resp.status_code and object_on_miss:
ras = RegisteredAvroSchema()
ras.schema_str = schema_str
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
raise TASRError('Schema not registered to any topics.')
#############################################################################
# Wrapped in a class
#############################################################################
class TASRLegacyClient(object):
'''An object means you only need to specify the host settings once.
'''
def __init__(self, host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout
# topic calls
def get_active_topics(self):
'''Returns a dict of <topic name>:<metadata> for active topics.'''
return get_active_topics(self.host, self.port, self.timeout)
def get_all_topics(self):
'''Returns a dict of <topic name>:<metadata> for all topics.'''
return get_all_topics(self.host, self.port, self.timeout)
# schema calls
def register_schema(self, topic_name, schema_str):
'''Register a schema for a topic'''
return register_schema(topic_name, schema_str)
def get_latest_schema(self, topic_name):
'''Get the latest schema registered for a topic'''
return get_latest_schema(topic_name,
self.host, self.port, self.timeout)
def get_schema_version(self, topic_name, version=None):
'''Get a schema by version for the topic'''
return get_schema_version(topic_name, version,
self.host, self.port, self.timeout)
def schema_for_id_str(self, id_str):
'''Get a schema identified by an ID str.'''
return schema_for_id_str(id_str,
self.ho |
alexlo03/ansible | lib/ansible/modules/cloud/google/gcp_storage_bucket_access_control.py | Python | gpl-3.0 | 12,479 | 0.003125 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------- | -----------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
############################################################ | ####################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_storage_bucket_access_control
description:
- The BucketAccessControls resource represents the Access Control Lists (ACLs) for
buckets within Google Cloud Storage. ACLs let you specify who has access to your
data and to what extent.
- 'There are three roles that can be assigned to an entity: READERs can get the bucket,
though no acl property will be returned, and list the bucket''s objects. WRITERs
are READERs, and they can insert objects into the bucket and delete the bucket''s
objects. OWNERs are WRITERs, and they can get the acl property of a bucket, update
a bucket, and call all BucketAccessControls methods on the bucket. For more information,
see Access Control, with the caveat that this API uses READER, WRITER, and OWNER
instead of READ, WRITE, and FULL_CONTROL.'
short_description: Creates a GCP BucketAccessControl
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
bucket:
description:
- The name of the bucket.
- 'This field represents a link to a Bucket resource in GCP. It can be specified in
two ways. You can add `register: name-of-resource` to a gcp_storage_bucket task
and then set this bucket field to "{{ name-of-resource }}" Alternatively, you can
set this bucket to a dictionary with the name key where the value is the name of
your Bucket.'
required: true
entity:
description:
- 'The entity holding the permission, in one of the following forms: user-userId
user-email group-groupId group-email domain-domain project-team-projectId allUsers
allAuthenticatedUsers Examples: The user liz@example.com would be
user-liz@example.com.'
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the
entity would be domain-example.com.
required: true
entity_id:
description:
- The ID for the entity.
required: false
project_team:
description:
- The project team associated with the entity.
required: false
suboptions:
project_number:
description:
- The project team associated with the entity.
required: false
team:
description:
- The team.
required: false
choices: ['editors', 'owners', 'viewers']
role:
description:
- The access permission for the entity.
required: false
choices: ['OWNER', 'READER', 'WRITER']
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a bucket
gcp_storage_bucket:
name: "bucket-bac"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: bucket
- name: create a bucket access control
gcp_storage_bucket_access_control:
bucket: "{{ bucket }}"
entity: user-alexstephen@google.com
role: WRITER
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
bucket:
description:
- The name of the bucket.
returned: success
type: dict
domain:
description:
- The domain associated with the entity.
returned: success
type: str
email:
description:
- The email address associated with the entity.
returned: success
type: str
entity:
description:
- 'The entity holding the permission, in one of the following forms: user-userId
user-email group-groupId group-email domain-domain project-team-projectId allUsers
allAuthenticatedUsers Examples: The user liz@example.com would be
user-liz@example.com.'
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the
entity would be domain-example.com.
returned: success
type: str
entityId:
description:
- The ID for the entity.
returned: success
type: str
id:
description:
- The ID of the access-control entry.
returned: success
type: str
projectTeam:
description:
- The project team associated with the entity.
returned: success
type: complex
contains:
projectNumber:
description:
- The project team associated with the entity.
returned: success
type: str
team:
description:
- The team.
returned: success
type: str
role:
description:
- The access permission for the entity.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
bucket=dict(required=True, type='dict'),
entity=dict(required=True, type='str'),
entity_id=dict(type='str'),
project_team=dict(type='dict', options=dict(
project_number=dict(type='str'),
team=dict(type='str', choices=['editors', 'owners', 'viewers'])
)),
role=dict(type='str', choices=['OWNER', 'READER', 'WRITER'])
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
state = module.params['state']
kind = 'storage#bucketAccessControl'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update |
mahim97/zulip | tools/documentation_crawler/documentation_crawler/spiders/check_documentation.py | Python | apache-2.0 | 669 | 0.004484 | import os
import pathlib
from typing import List
from .common.spiders import BaseDocumentationSpider
def get_start_url() -> List[str]:
# Get index html file as start url and convert it to file uri
dir_p | ath = os.path.dirname(os.path.realpath(__file__))
start_file = os.path.join(dir_path, os.path.join(*[os.pardir] * 4),
"docs/_build/html/index.html")
return [
pathlib.Path(os.path.abspath(start_file)).as_uri()
]
class DocumentationSpider(BaseDocument | ationSpider):
name = "documentation_crawler"
deny_domains = ['localhost:9991']
deny = ['\_sources\/.*\.txt']
start_urls = get_start_url()
|
ryanbressler/pydec | pydec/math/volume.py | Python | bsd-3-clause | 2,450 | 0.014694 | __all__ = ['unsigned_volume','signed_volume']
from scipy import sqrt,inner,shape,asarray
from scipy.misc import factorial
from scipy.linalg import det
def unsigned_volume(pts):
"""Unsigned volume of a simplex
Computes the unsigned volume of an M-simp | lex embedded in N-dimensional
space. The points are stored row-wise in an array with shape (M+1,N).
Parameters
----------
pts | : array
Array with shape (M+1,N) containing the coordinates
of the (M+1) vertices of the M-simplex.
Returns
-------
volume : scalar
Unsigned volume of the simplex
Notes
-----
Zero-dimensional simplices (points) are assigned unit volumes.
Examples
--------
>>> # 0-simplex point
>>> unsigned_volume( [[0,0]] )
1.0
>>> # 1-simplex line segment
>>> unsigned_volume( [[0,0],[1,0]] )
1.0
>>> # 2-simplex triangle
>>> unsigned_volume( [[0,0,0],[0,1,0],[1,0,0]] )
0.5
References
----------
[1] http://www.math.niu.edu/~rusin/known-math/97/volumes.polyh
"""
pts = asarray(pts)
M,N = pts.shape
M -= 1
if M < 0 or M > N:
raise ValueError('array has invalid shape')
if M == 0:
return 1.0
A = pts[1:] - pts[0]
return sqrt(det(inner(A,A)))/factorial(M)
def signed_volume(pts):
"""Signed volume of a simplex
Computes the signed volume of an M-simplex embedded in M-dimensional
space. The points are stored row-wise in an array with shape (M+1,M).
Parameters
----------
pts : array
Array with shape (M+1,M) containing the coordinates
of the (M+1) vertices of the M-simplex.
Returns
-------
volume : scalar
Signed volume of the simplex
Examples
--------
>>> # 1-simplex line segment
>>> signed_volume( [[0],[1]] )
1.0
>>> # 2-simplex triangle
>>> signed_volume( [[0,0],[1,0],[0,1]] )
0.5
>>> # 3-simplex tetrahedron
>>> signed_volume( [[0,0,0],[3,0,0],[0,1,0],[0,0,1]] )
0.5
References
----------
[1] http://www.math.niu.edu/~rusin/known-math/97/volumes.polyh
"""
pts = asarray(pts)
M,N = pts.shape
M -= 1
if M != N:
raise ValueError('array has invalid shape')
A = pts[1:] - pts[0]
return det(A)/factorial(M)
|
tgbugs/pyontutils | librdflib/setup.py | Python | mit | 1,448 | 0.000691 | import re
from setuptools import setup
def find_version(filename):
_version_re = re.compile(r"__version__ = '(.*)'")
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
__version__ = find_version('librdflib/__init__.py')
with open('README.md', 'rt') as f:
long_description = f.read()
tests_require = ['pytest']
setup(
name='librdflib',
version=__version__,
description='librdf parser for rdflib',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/tgbugs/pyontutils/tree/master/librdflib',
author='Tom Gillespie',
au | thor_email='tgbugs@gmail.com',
lice | nse='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='rdflib librdf rdf parser parsing ttl rdfxml',
packages=['librdflib'],
python_requires='>=3',
tests_require=tests_require,
install_requires=[
'rdflib', # really 5.0.0 if my changes go in but dev < 5
],
extras_require={'dev': ['pytest-cov', 'wheel'],
'test': tests_require,
},
entry_points={
'rdf.plugins.parser': [
'librdfxml = librdflib:libRdfxmlParser',
'libttl = librdflib:libTurtleParser',
],
},
)
|
dronly/python | lxfpython/advance/do_iter.py | Python | apache-2.0 | 382 | 0.039267 | #!/bin/python3.5
from collections import Iterable
d = {'a':1, 'b':2, 'c':3}
for key in d:
print(key)
for value in d.values():
print(value)
for ch in 'ABC':
pr | int(ch)
print(isinstance('abc', Iterable) )
print(isinstance(123,Iterable))
for i, value in enumerate(['a', 'b', 'c | ']):
print(i, value)
for x,y in [(1,1), (2,4), (3,9)]:
print(x,y)
list(range(1, 11))
print(list)
|
Udayraj123/dashboard_IITG | Binder/Binder/wsgi.py | Python | mit | 390 | 0 | """ |
WSGI config for Binder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Binder.settings")
application = get_wsgi_appl | ication()
|
dwhswenson/contact_map | contact_map/frequency_task.py | Python | lgpl-2.1 | 4,417 | 0.000679 | """
Task-based implementation of :class:`.ContactFrequency`.
The overall algorithm is:
1. Identify how we're go | ing to slice up the trajectory into task-based
chunks (:meth:`block_slices`, :meth:`default_slices`)
2. On each node
a. Load the trajectory segment (:meth:`load_trajectory_task`)
b. Run the analysis on the segment (:meth:`map_task`)
3. Once all the results h | ave been collected, combine them
(:meth:`reduce_all_results`)
Notes
-----
Includes versions where messages are Python objects and versions (labelled
with _json) where messages have been JSON-serialized. However, we don't yet
have a solution for JSON serialization of MDTraj objects, so if JSON
serialization is the communication method, the loading of the trajectory and
the calculation of the contacts must be combined into a single task.
"""
import mdtraj as md
from contact_map import ContactFrequency
from contact_map.contact_trajectory import _build_contacts
def block_slices(n_total, n_per_block):
"""Determine slices for splitting the input array.
Parameters
----------
n_total : int
total length of array
n_per_block : int
maximum number of items per block
Returns
-------
list of slice
slices to be applied to the array
"""
n_full_blocks = n_total // n_per_block
slices = [slice(i*n_per_block, (i+1)*n_per_block)
for i in range(n_full_blocks)]
if n_total % n_per_block:
slices.append(slice(n_full_blocks*n_per_block, n_total))
return slices
def default_slices(n_total, n_workers):
"""Calculate default slices from number of workers.
Default behavior is (approximately) one task per worker.
Parameters
----------
n_total : int
total number of items in array
n_workers : int
number of workers
Returns
-------
list of slice
slices to be applied to the array
"""
n_frames_per_task = max(1, n_total // n_workers)
return block_slices(n_total, n_frames_per_task)
def load_trajectory_task(subslice, file_name, **kwargs):
"""
Task for loading file. Reordered for to take per-task variable first.
Parameters
----------
subslice : slice
the slice of the trajectory to use
file_name : str
trajectory file name
kwargs :
other parameters to mdtraj.load
Returns
-------
md.Trajectory :
subtrajectory for this slice
"""
return md.load(file_name, **kwargs)[subslice]
def map_task(subtrajectory, parameters):
"""Task to be mapped to all subtrajectories. Run ContactFrequency
Parameters
----------
subtrajectory : mdtraj.Trajectory
single trajectory segment to calculate ContactFrequency for
parameters : dict
kwargs-style dict for the :class:`.ContactFrequency` object
Returns
-------
:class:`.ContactFrequency` :
contact frequency for the subtrajectory
"""
return ContactFrequency(subtrajectory, **parameters)
def contacts_per_frame_task(trajectory, contact_object):
"""Task that will mimic ContactTrajectory._build_contacts, but with
a pre-initialized ContactObject instead of `self` to produce the contacts
Parameters
----------
trajectory : mdtraj.Trajectory
single trajectory segment to calculate contacts for every frame
contactobject : ContactObject
The instance that will replace self in _build_contacts
"""
return _build_contacts(contact_object, trajectory)
def reduce_all_results(contacts):
"""Combine multiple :class:`.ContactFrequency` objects into one
Parameters
----------
contacts : iterable of :class:`.ContactFrequency`
the individual (partial) contact frequencies
Returns
-------
:class:`.ContactFrequency` :
total of all input contact frequencies (summing them)
"""
accumulator = contacts[0]
for contact in contacts[1:]:
accumulator.add_contact_frequency(contact)
return accumulator
def map_task_json(subtrajectory, parameters):
"""JSON-serialized version of :meth:`map_task`"""
return map_task(subtrajectory, parameters).to_json()
def reduce_all_results_json(results_of_map):
"""JSON-serialized version of :meth:`reduce_all_results`"""
contacts = [ContactFrequency.from_json(res) for res in results_of_map]
return reduce_all_results(contacts)
|
coreycb/horizon | openstack_dashboard/enabled/_9020_resource_browser.py | Python | apache-2.0 | 799 | 0 | # (c) Copyright 2015 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
| #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PANEL = 'resource_browser'
PANEL_GROUP | = 'default'
PANEL_DASHBOARD = 'developer'
ADD_PANEL = 'openstack_dashboard.contrib.developer.resource_browser.panel.ResourceBrowser' # noqa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.